summaryrefslogtreecommitdiff
path: root/tests/test_basic_api.py
diff options
context:
space:
mode:
authorgbrandl <devnull@localhost>2008-09-24 18:14:55 +0200
committergbrandl <devnull@localhost>2008-09-24 18:14:55 +0200
commit938060f2faccba4abbae4c298e93a537296cf89c (patch)
tree3422585d35ab955c5859be0f9d460f4ecc195eb6 /tests/test_basic_api.py
parentd0634d959bb52e1378cf866acc53df731e069b17 (diff)
parentd2e1a6638b3746142537e224b06fd842475798d0 (diff)
downloadpygments-938060f2faccba4abbae4c298e93a537296cf89c.tar.gz
Merge with Tim.
Diffstat (limited to 'tests/test_basic_api.py')
-rw-r--r--tests/test_basic_api.py10
1 files changed, 7 insertions, 3 deletions
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 760dda63..1168c7ad 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -17,6 +17,10 @@ from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
+import support
+
+TESTFILE, TESTDIR = support.location(__file__)
+
test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
@@ -90,7 +94,7 @@ class FiltersTest(unittest.TestCase):
for x in filters.FILTERS.keys():
lx = lexers.PythonLexer()
lx.add_filter(x, **filter_args.get(x, {}))
- text = file(os.path.join(testdir, testfile)).read().decode('utf-8')
+ text = file(TESTFILE).read().decode('utf-8')
tokens = list(lx.get_tokens(text))
roundtext = ''.join([t[1] for t in tokens])
if x not in ('whitespace', 'keywordcase'):
@@ -106,14 +110,14 @@ class FiltersTest(unittest.TestCase):
def test_whitespace(self):
lx = lexers.PythonLexer()
lx.add_filter('whitespace', spaces='%')
- text = file(os.path.join(testdir, testfile)).read().decode('utf-8')
+ text = file(TESTFILE).read().decode('utf-8')
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.failIf(' ' in lxtext)
def test_keywordcase(self):
lx = lexers.PythonLexer()
lx.add_filter('keywordcase', case='capitalize')
- text = file(os.path.join(testdir, testfile)).read().decode('utf-8')
+ text = file(TESTFILE).read().decode('utf-8')
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.assert_('Def' in lxtext and 'Class' in lxtext)