summaryrefslogtreecommitdiff
path: root/tests/test_r.py
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2020-09-08 20:20:19 +0200
committerGitHub <noreply@github.com>2020-09-08 20:20:19 +0200
commit9f5672672bd61f7149d2a165b49f0617a1a9fe8e (patch)
treead3d62c5c167c6a75edf67a88c20341c77566c7e /tests/test_r.py
parentd9a9e9ee40eb9815ecc3d9ec9d6f5e57499009d2 (diff)
downloadpygments-git-9f5672672bd61f7149d2a165b49f0617a1a9fe8e.tar.gz
all: remove "u" string prefix (#1536)
* all: remove "u" string prefix * util: remove unirange Since Python 3.3, all builds are wide unicode compatible. * unistring: remove support for narrow-unicode builds which stopped being relevant with Python 3.3
Diffstat (limited to 'tests/test_r.py')
-rw-r--r--tests/test_r.py76
1 files changed, 38 insertions, 38 deletions
diff --git a/tests/test_r.py b/tests/test_r.py
index 2814acd7..c243652b 100644
--- a/tests/test_r.py
+++ b/tests/test_r.py
@@ -19,76 +19,76 @@ def lexer():
def test_call(lexer):
- fragment = u'f(1, a)\n'
+ fragment = 'f(1, a)\n'
tokens = [
- (Name.Function, u'f'),
- (Punctuation, u'('),
- (Token.Literal.Number, u'1'),
- (Punctuation, u','),
- (Token.Text, u' '),
- (Token.Name, u'a'),
- (Punctuation, u')'),
- (Token.Text, u'\n'),
+ (Name.Function, 'f'),
+ (Punctuation, '('),
+ (Token.Literal.Number, '1'),
+ (Punctuation, ','),
+ (Token.Text, ' '),
+ (Token.Name, 'a'),
+ (Punctuation, ')'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_name1(lexer):
- fragment = u'._a_2.c'
+ fragment = '._a_2.c'
tokens = [
- (Name, u'._a_2.c'),
- (Token.Text, u'\n'),
+ (Name, '._a_2.c'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_name2(lexer):
# Invalid names are valid if backticks are used
- fragment = u'`.1 blah`'
+ fragment = '`.1 blah`'
tokens = [
- (Name, u'`.1 blah`'),
- (Token.Text, u'\n'),
+ (Name, '`.1 blah`'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_name3(lexer):
# Internal backticks can be escaped
- fragment = u'`.1 \\` blah`'
+ fragment = '`.1 \\` blah`'
tokens = [
- (Name, u'`.1 \\` blah`'),
- (Token.Text, u'\n'),
+ (Name, '`.1 \\` blah`'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_custom_operator(lexer):
- fragment = u'7 % and % 8'
+ fragment = '7 % and % 8'
tokens = [
- (Token.Literal.Number, u'7'),
- (Token.Text, u' '),
- (Token.Operator, u'% and %'),
- (Token.Text, u' '),
- (Token.Literal.Number, u'8'),
- (Token.Text, u'\n'),
+ (Token.Literal.Number, '7'),
+ (Token.Text, ' '),
+ (Token.Operator, '% and %'),
+ (Token.Text, ' '),
+ (Token.Literal.Number, '8'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_indexing(lexer):
- fragment = u'a[1]'
+ fragment = 'a[1]'
tokens = [
- (Token.Name, u'a'),
- (Token.Punctuation, u'['),
- (Token.Literal.Number, u'1'),
- (Token.Punctuation, u']'),
- (Token.Text, u'\n'),
+ (Token.Name, 'a'),
+ (Token.Punctuation, '['),
+ (Token.Literal.Number, '1'),
+ (Token.Punctuation, ']'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
def test_dot_name(lexer):
- fragment = u'. <- 1'
+ fragment = '. <- 1'
tokens = [
(Token.Name, '.'),
(Token.Text, ' '),
@@ -101,12 +101,12 @@ def test_dot_name(lexer):
def test_dot_indexing(lexer):
- fragment = u'.[1]'
+ fragment = '.[1]'
tokens = [
- (Token.Name, u'.'),
- (Token.Punctuation, u'['),
- (Token.Literal.Number, u'1'),
- (Token.Punctuation, u']'),
- (Token.Text, u'\n'),
+ (Token.Name, '.'),
+ (Token.Punctuation, '['),
+ (Token.Literal.Number, '1'),
+ (Token.Punctuation, ']'),
+ (Token.Text, '\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens