diff options
author | Berker Peksag <berker.peksag@gmail.com> | 2017-02-05 04:32:39 +0300 |
---|---|---|
committer | Berker Peksag <berker.peksag@gmail.com> | 2017-02-05 04:32:39 +0300 |
commit | 8e98360ead0753f5eafb02a9479bc4425aa1e330 (patch) | |
tree | 63ff1f2862afe41e1d0361b2be24835ad69f9e1a | |
parent | 391ed6c73cb1964b57d0b538370a76d03d30511b (diff) | |
download | cpython-8e98360ead0753f5eafb02a9479bc4425aa1e330.tar.gz |
Issue #28489: Fix comment in tokenizer.c
Patch by Ryan Gonzalez.
-rw-r--r-- | Parser/tokenizer.c | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c index 0fa3aebc0f..ff65f2a735 100644 --- a/Parser/tokenizer.c +++ b/Parser/tokenizer.c @@ -1508,7 +1508,7 @@ tok_get(struct tok_state *tok, char **p_start, char **p_end) /* Identifier (most frequent token!) */ nonascii = 0; if (is_potential_identifier_start(c)) { - /* Process b"", r"", u"", br"" and rb"" */ + /* Process the various legal combinations of b"", r"", u"", and f"". */ int saw_b = 0, saw_r = 0, saw_u = 0, saw_f = 0; while (1) { if (!(saw_b || saw_u || saw_f) && (c == 'b' || c == 'B')) |