summaryrefslogtreecommitdiff
path: root/src/backend/regex/regc_pg_locale.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2016-09-05 17:06:29 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2016-09-05 17:06:29 -0400
commitc54159d44ceaba26ceda9fea1804f0de122a8f30 (patch)
treeeaab96027f054cf5ff864d0745e446e8b8e13544 /src/backend/regex/regc_pg_locale.c
parentf80049f76a32858601510eaaef19ab8160e4c9b3 (diff)
downloadpostgresql-c54159d44ceaba26ceda9fea1804f0de122a8f30.tar.gz
Make locale-dependent regex character classes work for large char codes.
Previously, we failed to recognize Unicode characters above U+7FF as being members of locale-dependent character classes such as [[:alpha:]]. (Actually, the same problem occurs for large pg_wchar values in any multibyte encoding, but UTF8 is the only case people have actually complained about.) It's impractical to get Spencer's original code to handle character classes or ranges containing many thousands of characters, because it insists on considering each member character individually at regex compile time, whether or not the character will ever be of interest at run time. To fix, choose a cutoff point MAX_SIMPLE_CHR below which we process characters individually as before, and deal with entire ranges or classes as single entities above that. We can actually make things cheaper than before for chars below the cutoff, because the color map can now be a simple linear array for those chars, rather than the multilevel tree structure Spencer designed. It's more expensive than before for chars above the cutoff, because we must do a binary search in a list of high chars and char ranges used in the regex pattern, plus call iswalpha() and friends for each locale-dependent character class used in the pattern. However, multibyte encodings are normally designed to give smaller codes to popular characters, so that we can expect that the slow path will be taken relatively infrequently. In any case, the speed penalty appears minor except when we have to apply iswalpha() etc. to high character codes at runtime --- and the previous coding gave wrong answers for those cases, so whether it was faster is moot. Tom Lane, reviewed by Heikki Linnakangas Discussion: <15563.1471913698@sss.pgh.pa.us>
Diffstat (limited to 'src/backend/regex/regc_pg_locale.c')
-rw-r--r--src/backend/regex/regc_pg_locale.c36
1 files changed, 24 insertions, 12 deletions
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index 551ae7dc08..ad9d4b1961 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -736,7 +736,7 @@ store_match(pg_ctype_cache *pcc, pg_wchar chr1, int nchrs)
* Note that the result must not be freed or modified by caller.
*/
static struct cvec *
-pg_ctype_get_cache(pg_wc_probefunc probefunc)
+pg_ctype_get_cache(pg_wc_probefunc probefunc, int cclasscode)
{
pg_ctype_cache *pcc;
pg_wchar max_chr;
@@ -770,31 +770,43 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc)
pcc->cv.ranges = (chr *) malloc(pcc->cv.rangespace * sizeof(chr) * 2);
if (pcc->cv.chrs == NULL || pcc->cv.ranges == NULL)
goto out_of_memory;
+ pcc->cv.cclasscode = cclasscode;
/*
- * Decide how many character codes we ought to look through. For C locale
- * there's no need to go further than 127. Otherwise, if the encoding is
- * UTF8 go up to 0x7FF, which is a pretty arbitrary cutoff but we cannot
- * extend it as far as we'd like (say, 0xFFFF, the end of the Basic
- * Multilingual Plane) without creating significant performance issues due
- * to too many characters being fed through the colormap code. This will
- * need redesign to fix reasonably, but at least for the moment we have
- * all common European languages covered. Otherwise (not C, not UTF8) go
- * up to 255. These limits are interrelated with restrictions discussed
- * at the head of this file.
+ * Decide how many character codes we ought to look through. In general
+ * we don't go past MAX_SIMPLE_CHR; chr codes above that are handled at
+ * runtime using the "high colormap" mechanism. However, in C locale
+ * there's no need to go further than 127, and if we only have a 1-byte
+ * <ctype.h> API there's no need to go further than that can handle.
+ *
+ * If it's not MAX_SIMPLE_CHR that's constraining the search, mark the
+ * output cvec as not having any locale-dependent behavior, since there
+ * will be no need to do any run-time locale checks. (The #if's here
+ * would always be true for production values of MAX_SIMPLE_CHR, but it's
+ * useful to allow it to be small for testing purposes.)
*/
switch (pg_regex_strategy)
{
case PG_REGEX_LOCALE_C:
+#if MAX_SIMPLE_CHR >= 127
max_chr = (pg_wchar) 127;
+ pcc->cv.cclasscode = -1;
+#else
+ max_chr = (pg_wchar) MAX_SIMPLE_CHR;
+#endif
break;
case PG_REGEX_LOCALE_WIDE:
case PG_REGEX_LOCALE_WIDE_L:
- max_chr = (pg_wchar) 0x7FF;
+ max_chr = (pg_wchar) MAX_SIMPLE_CHR;
break;
case PG_REGEX_LOCALE_1BYTE:
case PG_REGEX_LOCALE_1BYTE_L:
+#if MAX_SIMPLE_CHR >= UCHAR_MAX
max_chr = (pg_wchar) UCHAR_MAX;
+ pcc->cv.cclasscode = -1;
+#else
+ max_chr = (pg_wchar) MAX_SIMPLE_CHR;
+#endif
break;
default:
max_chr = 0; /* can't get here, but keep compiler quiet */