diff options
author | Georg Brandl <georg@python.org> | 2020-09-08 20:20:19 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2020-09-08 20:20:19 +0200 |
commit | 9f5672672bd61f7149d2a165b49f0617a1a9fe8e (patch) | |
tree | ad3d62c5c167c6a75edf67a88c20341c77566c7e /tests/test_kotlin.py | |
parent | d9a9e9ee40eb9815ecc3d9ec9d6f5e57499009d2 (diff) | |
download | pygments-git-9f5672672bd61f7149d2a165b49f0617a1a9fe8e.tar.gz |
all: remove "u" string prefix (#1536)
* all: remove "u" string prefix
* util: remove unirange
Since Python 3.3, all builds are wide unicode compatible.
* unistring: remove support for narrow-unicode builds
which stopped being relevant with Python 3.3
Diffstat (limited to 'tests/test_kotlin.py')
-rw-r--r-- | tests/test_kotlin.py | 158 |
1 files changed, 79 insertions, 79 deletions
diff --git a/tests/test_kotlin.py b/tests/test_kotlin.py index 2f0eb376..9b9e898d 100644 --- a/tests/test_kotlin.py +++ b/tests/test_kotlin.py @@ -19,115 +19,115 @@ def lexer(): def test_can_cope_with_backtick_names_in_functions(lexer): - fragment = u'fun `wo bble`' + fragment = 'fun `wo bble`' tokens = [ - (Keyword, u'fun'), - (Text, u' '), - (Name.Function, u'`wo bble`'), - (Text, u'\n') + (Keyword, 'fun'), + (Text, ' '), + (Name.Function, '`wo bble`'), + (Text, '\n') ] assert list(lexer.get_tokens(fragment)) == tokens def test_can_cope_with_commas_and_dashes_in_backtick_Names(lexer): - fragment = u'fun `wo,-bble`' + fragment = 'fun `wo,-bble`' tokens = [ - (Keyword, u'fun'), - (Text, u' '), - (Name.Function, u'`wo,-bble`'), - (Text, u'\n') + (Keyword, 'fun'), + (Text, ' '), + (Name.Function, '`wo,-bble`'), + (Text, '\n') ] assert list(lexer.get_tokens(fragment)) == tokens def test_can_cope_with_destructuring(lexer): - fragment = u'val (a, b) = ' + fragment = 'val (a, b) = ' tokens = [ - (Keyword, u'val'), - (Text, u' '), - (Punctuation, u'('), - (Name.Property, u'a'), - (Punctuation, u','), - (Text, u' '), - (Name.Property, u'b'), - (Punctuation, u')'), - (Text, u' '), - (Punctuation, u'='), - (Text, u' '), - (Text, u'\n') + (Keyword, 'val'), + (Text, ' '), + (Punctuation, '('), + (Name.Property, 'a'), + (Punctuation, ','), + (Text, ' '), + (Name.Property, 'b'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, '='), + (Text, ' '), + (Text, '\n') ] assert list(lexer.get_tokens(fragment)) == tokens def test_can_cope_generics_in_destructuring(lexer): - fragment = u'val (a: List<Something>, b: Set<Wobble>) =' + fragment = 'val (a: List<Something>, b: Set<Wobble>) =' tokens = [ - (Keyword, u'val'), - (Text, u' '), - (Punctuation, u'('), - (Name.Property, u'a'), - (Punctuation, u':'), - (Text, u' '), - (Name.Property, u'List'), - (Punctuation, u'<'), - (Name, u'Something'), - (Punctuation, u'>'), - (Punctuation, u','), - (Text, u' '), - (Name.Property, u'b'), - (Punctuation, u':'), - (Text, u' '), - (Name.Property, u'Set'), - (Punctuation, u'<'), - (Name, u'Wobble'), - (Punctuation, u'>'), - (Punctuation, u')'), - (Text, u' '), - (Punctuation, u'='), - (Text, u'\n') + (Keyword, 'val'), + (Text, ' '), + (Punctuation, '('), + (Name.Property, 'a'), + (Punctuation, ':'), + (Text, ' '), + (Name.Property, 'List'), + (Punctuation, '<'), + (Name, 'Something'), + (Punctuation, '>'), + (Punctuation, ','), + (Text, ' '), + (Name.Property, 'b'), + (Punctuation, ':'), + (Text, ' '), + (Name.Property, 'Set'), + (Punctuation, '<'), + (Name, 'Wobble'), + (Punctuation, '>'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, '='), + (Text, '\n') ] assert list(lexer.get_tokens(fragment)) == tokens def test_can_cope_with_generics(lexer): - fragment = u'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {' + fragment = 'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {' tokens = [ - (Keyword, u'inline fun'), - (Text, u' '), - (Punctuation, u'<'), - (Keyword, u'reified'), - (Text, u' '), - (Name, u'T'), - (Text, u' '), - (Punctuation, u':'), - (Text, u' '), - (Name, u'ContractState'), - (Punctuation, u'>'), - (Text, u' '), - (Name.Class, u'VaultService'), - (Punctuation, u'.'), - (Name.Function, u'queryBy'), - (Punctuation, u'('), - (Punctuation, u')'), - (Punctuation, u':'), - (Text, u' '), - (Name, u'Vault'), - (Punctuation, u'.'), - (Name, u'Page'), - (Punctuation, u'<'), - (Name, u'T'), - (Punctuation, u'>'), - (Text, u' '), - (Punctuation, u'{'), - (Text, u'\n') + (Keyword, 'inline fun'), + (Text, ' '), + (Punctuation, '<'), + (Keyword, 'reified'), + (Text, ' '), + (Name, 'T'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name, 'ContractState'), + (Punctuation, '>'), + (Text, ' '), + (Name.Class, 'VaultService'), + (Punctuation, '.'), + (Name.Function, 'queryBy'), + (Punctuation, '('), + (Punctuation, ')'), + (Punctuation, ':'), + (Text, ' '), + (Name, 'Vault'), + (Punctuation, '.'), + (Name, 'Page'), + (Punctuation, '<'), + (Name, 'T'), + (Punctuation, '>'), + (Text, ' '), + (Punctuation, '{'), + (Text, '\n') ] assert list(lexer.get_tokens(fragment)) == tokens def test_should_cope_with_multiline_comments(lexer): - fragment = u'"""\nthis\nis\na\ncomment"""' + fragment = '"""\nthis\nis\na\ncomment"""' tokens = [ - (String, u'"""\nthis\nis\na\ncomment"""'), - (Text, u'\n') + (String, '"""\nthis\nis\na\ncomment"""'), + (Text, '\n') ] assert list(lexer.get_tokens(fragment)) == tokens |