summaryrefslogtreecommitdiff
path: root/tests/test_basic_api.py
diff options
context:
space:
mode:
authorgbrandl <devnull@localhost>2008-09-23 23:06:28 +0200
committergbrandl <devnull@localhost>2008-09-23 23:06:28 +0200
commita34d0680ec7181202ee5adb4bf185ff1ffc9e0ed (patch)
tree9eafc4f149b9a17216fd0d08e5e36ecbf5761335 /tests/test_basic_api.py
parent6e735f945f1845566f9e5dbcf956b6d1c8f54916 (diff)
parent420f7fa8594b96d6eed23073f0525a431218bb3e (diff)
downloadpygments-a34d0680ec7181202ee5adb4bf185ff1ffc9e0ed.tar.gz
Merge with ben-file2open.
Diffstat (limited to 'tests/test_basic_api.py')
-rw-r--r--tests/test_basic_api.py10
1 files changed, 7 insertions, 3 deletions
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 760dda63..1168c7ad 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -17,6 +17,10 @@ from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
+import support
+
+TESTFILE, TESTDIR = support.location(__file__)
+
test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
@@ -90,7 +94,7 @@ class FiltersTest(unittest.TestCase):
for x in filters.FILTERS.keys():
lx = lexers.PythonLexer()
lx.add_filter(x, **filter_args.get(x, {}))
- text = file(os.path.join(testdir, testfile)).read().decode('utf-8')
+ text = file(TESTFILE).read().decode('utf-8')
tokens = list(lx.get_tokens(text))
roundtext = ''.join([t[1] for t in tokens])
if x not in ('whitespace', 'keywordcase'):
@@ -106,14 +110,14 @@ class FiltersTest(unittest.TestCase):
def test_whitespace(self):
lx = lexers.PythonLexer()
lx.add_filter('whitespace', spaces='%')
- text = file(os.path.join(testdir, testfile)).read().decode('utf-8')
+ text = file(TESTFILE).read().decode('utf-8')
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.failIf(' ' in lxtext)
def test_keywordcase(self):
lx = lexers.PythonLexer()
lx.add_filter('keywordcase', case='capitalize')
- text = file(os.path.join(testdir, testfile)).read().decode('utf-8')
+ text = file(TESTFILE).read().decode('utf-8')
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.assert_('Def' in lxtext and 'Class' in lxtext)