summaryrefslogtreecommitdiff
path: root/libcpp/lex.c
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@baserock.org>2013-04-11 09:13:11 +0000
committer <>2014-04-23 12:05:38 +0000
commit6af3fdec2262dd94954acc5e426ef71cbd4521d3 (patch)
tree9be02de9a80f7935892a2d03741adee44723e65d /libcpp/lex.c
parent19be2b4342ac32e9edc78ce6fed8f61b63ae98d1 (diff)
downloadgcc-tarball-6af3fdec2262dd94954acc5e426ef71cbd4521d3.tar.gz
Imported from /home/lorry/working-area/delta_gcc-tarball/gcc-4.7.3.tar.bz2.gcc-4.7.3
Diffstat (limited to 'libcpp/lex.c')
-rw-r--r--libcpp/lex.c118
1 files changed, 96 insertions, 22 deletions
diff --git a/libcpp/lex.c b/libcpp/lex.c
index 6c3be71150..0ad9660a76 100644
--- a/libcpp/lex.c
+++ b/libcpp/lex.c
@@ -1,6 +1,6 @@
/* CPP Library - lexical analysis.
- Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010
- Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010,
+ 2011 Free Software Foundation, Inc.
Contributed by Per Bothner, 1994-95.
Based on CCCP program by Paul Rubin, June 1986
Adapted to ANSI C, Richard Stallman, Jan 1987
@@ -477,7 +477,8 @@ search_line_sse42 (const uchar *s, const uchar *end)
typedef const uchar * (*search_line_fast_type) (const uchar *, const uchar *);
static search_line_fast_type search_line_fast;
-static void __attribute__((constructor))
+#define HAVE_init_vectorized_lexer 1
+static inline void
init_vectorized_lexer (void)
{
unsigned dummy, ecx = 0, edx = 0;
@@ -638,6 +639,16 @@ search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED)
#endif
+/* Initialize the lexer if needed. */
+
+void
+_cpp_init_lexer (void)
+{
+#ifdef HAVE_init_vectorized_lexer
+ init_vectorized_lexer ();
+#endif
+}
+
/* Returns with a logical line that contains no escaped newlines or
trigraphs. This is a time-critical inner loop. */
void
@@ -1270,7 +1281,6 @@ static void
lex_raw_string (cpp_reader *pfile, cpp_token *token, const uchar *base,
const uchar *cur)
{
- source_location saw_NUL = 0;
const uchar *raw_prefix;
unsigned int raw_prefix_len = 0;
enum cpp_ttype type;
@@ -1476,15 +1486,20 @@ lex_raw_string (cpp_reader *pfile, cpp_token *token, const uchar *base,
cur = base = pfile->buffer->cur;
note = &pfile->buffer->notes[pfile->buffer->cur_note];
}
- else if (c == '\0' && !saw_NUL)
- LINEMAP_POSITION_FOR_COLUMN (saw_NUL, pfile->line_table,
- CPP_BUF_COLUMN (pfile->buffer, cur));
}
break_outer_loop:
- if (saw_NUL && !pfile->state.skipping)
- cpp_error_with_line (pfile, CPP_DL_WARNING, saw_NUL, 0,
- "null character(s) preserved in literal");
+ if (CPP_OPTION (pfile, user_literals))
+ {
+ /* Grab user defined literal suffix. */
+ if (ISIDST (*cur))
+ {
+ type = cpp_userdef_string_add_type (type);
+ ++cur;
+ }
+ while (ISIDNUM (*cur))
+ ++cur;
+ }
pfile->buffer->cur = cur;
if (first_buff == NULL)
@@ -1589,6 +1604,19 @@ lex_string (cpp_reader *pfile, cpp_token *token, const uchar *base)
cpp_error (pfile, CPP_DL_PEDWARN, "missing terminating %c character",
(int) terminator);
+ if (CPP_OPTION (pfile, user_literals))
+ {
+ /* Grab user defined literal suffix. */
+ if (ISIDST (*cur))
+ {
+ type = cpp_userdef_char_add_type (type);
+ type = cpp_userdef_string_add_type (type);
+ ++cur;
+ }
+ while (ISIDNUM (*cur))
+ ++cur;
+ }
+
pfile->buffer->cur = cur;
create_literal (pfile, token, base, cur - base, type);
}
@@ -1711,6 +1739,34 @@ next_tokenrun (tokenrun *run)
return run->next;
}
+/* Return the number of not yet processed token in a given
+ context. */
+int
+_cpp_remaining_tokens_num_in_context (cpp_context *context)
+{
+ if (context->tokens_kind == TOKENS_KIND_DIRECT)
+ return (LAST (context).token - FIRST (context).token);
+ else if (context->tokens_kind == TOKENS_KIND_INDIRECT
+ || context->tokens_kind == TOKENS_KIND_EXTENDED)
+ return (LAST (context).ptoken - FIRST (context).ptoken);
+ else
+ abort ();
+}
+
+/* Returns the token present at index INDEX in a given context. If
+ INDEX is zero, the next token to be processed is returned. */
+static const cpp_token*
+_cpp_token_from_context_at (cpp_context *context, int index)
+{
+ if (context->tokens_kind == TOKENS_KIND_DIRECT)
+ return &(FIRST (context).token[index]);
+ else if (context->tokens_kind == TOKENS_KIND_INDIRECT
+ || context->tokens_kind == TOKENS_KIND_EXTENDED)
+ return FIRST (context).ptoken[index];
+ else
+ abort ();
+}
+
/* Look ahead in the input stream. */
const cpp_token *
cpp_peek_token (cpp_reader *pfile, int index)
@@ -1722,15 +1778,10 @@ cpp_peek_token (cpp_reader *pfile, int index)
/* First, scan through any pending cpp_context objects. */
while (context->prev)
{
- ptrdiff_t sz = (context->direct_p
- ? LAST (context).token - FIRST (context).token
- : LAST (context).ptoken - FIRST (context).ptoken);
+ ptrdiff_t sz = _cpp_remaining_tokens_num_in_context (context);
if (index < (int) sz)
- return (context->direct_p
- ? FIRST (context).token + index
- : *(FIRST (context).ptoken + index));
-
+ return _cpp_token_from_context_at (context, index);
index -= (int) sz;
context = context->prev;
}
@@ -1983,8 +2034,11 @@ _cpp_lex_direct (cpp_reader *pfile)
}
c = *buffer->cur++;
- LINEMAP_POSITION_FOR_COLUMN (result->src_loc, pfile->line_table,
- CPP_BUF_COLUMN (buffer, buffer->cur));
+ if (pfile->forced_token_location_p)
+ result->src_loc = *pfile->forced_token_location_p;
+ else
+ result->src_loc = linemap_position_for_column (pfile->line_table,
+ CPP_BUF_COLUMN (buffer, buffer->cur));
switch (c)
{
@@ -2015,18 +2069,20 @@ _cpp_lex_direct (cpp_reader *pfile)
case 'R':
/* 'L', 'u', 'U', 'u8' or 'R' may introduce wide characters,
wide strings or raw strings. */
- if (c == 'L' || CPP_OPTION (pfile, uliterals))
+ if (c == 'L' || CPP_OPTION (pfile, rliterals)
+ || (c != 'R' && CPP_OPTION (pfile, uliterals)))
{
if ((*buffer->cur == '\'' && c != 'R')
|| *buffer->cur == '"'
|| (*buffer->cur == 'R'
&& c != 'R'
&& buffer->cur[1] == '"'
- && CPP_OPTION (pfile, uliterals))
+ && CPP_OPTION (pfile, rliterals))
|| (*buffer->cur == '8'
&& c == 'u'
&& (buffer->cur[1] == '"'
- || (buffer->cur[1] == 'R' && buffer->cur[2] == '"'))))
+ || (buffer->cur[1] == 'R' && buffer->cur[2] == '"'
+ && CPP_OPTION (pfile, rliterals)))))
{
lex_string (pfile, result, buffer->cur - 1);
break;
@@ -2845,3 +2901,21 @@ cpp_token_val_index (cpp_token *tok)
return CPP_TOKEN_FLD_NONE;
}
}
+
+/* All tokens lexed in R after calling this function will be forced to have
+ their source_location the same as the location referenced by P, until
+ cpp_stop_forcing_token_locations is called for R. */
+
+void
+cpp_force_token_locations (cpp_reader *r, source_location *p)
+{
+ r->forced_token_location_p = p;
+}
+
+/* Go back to assigning locations naturally for lexed tokens. */
+
+void
+cpp_stop_forcing_token_locations (cpp_reader *r)
+{
+ r->forced_token_location_p = NULL;
+}