diff options
Diffstat (limited to 'compat')
62 files changed, 25740 insertions, 0 deletions
diff --git a/compat/apple-common-crypto.h b/compat/apple-common-crypto.h new file mode 100644 index 0000000000..c8b9b0e1a6 --- /dev/null +++ b/compat/apple-common-crypto.h @@ -0,0 +1,86 @@ +/* suppress inclusion of conflicting openssl functions */ +#define OPENSSL_NO_MD5 +#define HEADER_HMAC_H +#define HEADER_SHA_H +#include <CommonCrypto/CommonHMAC.h> +#define HMAC_CTX CCHmacContext +#define HMAC_Init(hmac, key, len, algo) CCHmacInit(hmac, algo, key, len) +#define HMAC_Update CCHmacUpdate +#define HMAC_Final(hmac, hash, ptr) CCHmacFinal(hmac, hash) +#define HMAC_CTX_cleanup(ignore) +#define EVP_md5(...) kCCHmacAlgMD5 +#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070 +#define APPLE_LION_OR_NEWER +#include <Security/Security.h> +/* Apple's TYPE_BOOL conflicts with config.c */ +#undef TYPE_BOOL +#endif + +#ifdef APPLE_LION_OR_NEWER +#define git_CC_error_check(pattern, err) \ + do { \ + if (err) { \ + die(pattern, (long)CFErrorGetCode(err)); \ + } \ + } while(0) + +#define EVP_EncodeBlock git_CC_EVP_EncodeBlock +static inline int git_CC_EVP_EncodeBlock(unsigned char *out, + const unsigned char *in, int inlen) +{ + CFErrorRef err; + SecTransformRef encoder; + CFDataRef input, output; + CFIndex length; + + encoder = SecEncodeTransformCreate(kSecBase64Encoding, &err); + git_CC_error_check("SecEncodeTransformCreate failed: %ld", err); + + input = CFDataCreate(kCFAllocatorDefault, in, inlen); + SecTransformSetAttribute(encoder, kSecTransformInputAttributeName, + input, &err); + git_CC_error_check("SecTransformSetAttribute failed: %ld", err); + + output = SecTransformExecute(encoder, &err); + git_CC_error_check("SecTransformExecute failed: %ld", err); + + length = CFDataGetLength(output); + CFDataGetBytes(output, CFRangeMake(0, length), out); + + CFRelease(output); + CFRelease(input); + CFRelease(encoder); + + return (int)strlen((const char *)out); +} + +#define EVP_DecodeBlock git_CC_EVP_DecodeBlock +static int inline git_CC_EVP_DecodeBlock(unsigned char *out, + const unsigned char *in, int inlen) +{ + CFErrorRef err; + SecTransformRef decoder; + CFDataRef input, output; + CFIndex length; + + decoder = SecDecodeTransformCreate(kSecBase64Encoding, &err); + git_CC_error_check("SecEncodeTransformCreate failed: %ld", err); + + input = CFDataCreate(kCFAllocatorDefault, in, inlen); + SecTransformSetAttribute(decoder, kSecTransformInputAttributeName, + input, &err); + git_CC_error_check("SecTransformSetAttribute failed: %ld", err); + + output = SecTransformExecute(decoder, &err); + git_CC_error_check("SecTransformExecute failed: %ld", err); + + length = CFDataGetLength(output); + CFDataGetBytes(output, CFRangeMake(0, length), out); + + CFRelease(output); + CFRelease(input); + CFRelease(decoder); + + return (int)strlen((const char *)out); +} +#endif /* APPLE_LION_OR_NEWER */ diff --git a/compat/basename.c b/compat/basename.c new file mode 100644 index 0000000000..d8f8a3c6dc --- /dev/null +++ b/compat/basename.c @@ -0,0 +1,15 @@ +#include "../git-compat-util.h" + +/* Adapted from libiberty's basename.c. */ +char *gitbasename (char *path) +{ + const char *base; + /* Skip over the disk name in MSDOS pathnames. */ + if (has_dos_drive_prefix(path)) + path += 2; + for (base = path; *path; path++) { + if (is_dir_sep(*path)) + base = path + 1; + } + return (char *)base; +} diff --git a/compat/bswap.h b/compat/bswap.h new file mode 100644 index 0000000000..f6fd9a6a6c --- /dev/null +++ b/compat/bswap.h @@ -0,0 +1,175 @@ +/* + * Let's make sure we always have a sane definition for ntohl()/htonl(). + * Some libraries define those as a function call, just to perform byte + * shifting, bringing significant overhead to what should be a simple + * operation. + */ + +/* + * Default version that the compiler ought to optimize properly with + * constant values. + */ +static inline uint32_t default_swab32(uint32_t val) +{ + return (((val & 0xff000000) >> 24) | + ((val & 0x00ff0000) >> 8) | + ((val & 0x0000ff00) << 8) | + ((val & 0x000000ff) << 24)); +} + +static inline uint64_t default_bswap64(uint64_t val) +{ + return (((val & (uint64_t)0x00000000000000ffULL) << 56) | + ((val & (uint64_t)0x000000000000ff00ULL) << 40) | + ((val & (uint64_t)0x0000000000ff0000ULL) << 24) | + ((val & (uint64_t)0x00000000ff000000ULL) << 8) | + ((val & (uint64_t)0x000000ff00000000ULL) >> 8) | + ((val & (uint64_t)0x0000ff0000000000ULL) >> 24) | + ((val & (uint64_t)0x00ff000000000000ULL) >> 40) | + ((val & (uint64_t)0xff00000000000000ULL) >> 56)); +} + +#undef bswap32 +#undef bswap64 + +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + +#define bswap32 git_bswap32 +static inline uint32_t git_bswap32(uint32_t x) +{ + uint32_t result; + if (__builtin_constant_p(x)) + result = default_swab32(x); + else + __asm__("bswap %0" : "=r" (result) : "0" (x)); + return result; +} + +#define bswap64 git_bswap64 +#if defined(__x86_64__) +static inline uint64_t git_bswap64(uint64_t x) +{ + uint64_t result; + if (__builtin_constant_p(x)) + result = default_bswap64(x); + else + __asm__("bswap %q0" : "=r" (result) : "0" (x)); + return result; +} +#else +static inline uint64_t git_bswap64(uint64_t x) +{ + union { uint64_t i64; uint32_t i32[2]; } tmp, result; + if (__builtin_constant_p(x)) + result.i64 = default_bswap64(x); + else { + tmp.i64 = x; + result.i32[0] = git_bswap32(tmp.i32[1]); + result.i32[1] = git_bswap32(tmp.i32[0]); + } + return result.i64; +} +#endif + +#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) + +#include <stdlib.h> + +#define bswap32(x) _byteswap_ulong(x) +#define bswap64(x) _byteswap_uint64(x) + +#endif + +#if defined(bswap32) + +#undef ntohl +#undef htonl +#define ntohl(x) bswap32(x) +#define htonl(x) bswap32(x) + +#endif + +#if defined(bswap64) + +#undef ntohll +#undef htonll +#define ntohll(x) bswap64(x) +#define htonll(x) bswap64(x) + +#else + +#undef ntohll +#undef htonll + +#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) + +# define GIT_BYTE_ORDER __BYTE_ORDER +# define GIT_LITTLE_ENDIAN __LITTLE_ENDIAN +# define GIT_BIG_ENDIAN __BIG_ENDIAN + +#elif defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && defined(BIG_ENDIAN) + +# define GIT_BYTE_ORDER BYTE_ORDER +# define GIT_LITTLE_ENDIAN LITTLE_ENDIAN +# define GIT_BIG_ENDIAN BIG_ENDIAN + +#else + +# define GIT_BIG_ENDIAN 4321 +# define GIT_LITTLE_ENDIAN 1234 + +# if defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) +# define GIT_BYTE_ORDER GIT_BIG_ENDIAN +# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) +# define GIT_BYTE_ORDER GIT_LITTLE_ENDIAN +# else +# error "Cannot determine endianness" +# endif + +#endif + +#if GIT_BYTE_ORDER == GIT_BIG_ENDIAN +# define ntohll(n) (n) +# define htonll(n) (n) +#else +# define ntohll(n) default_bswap64(n) +# define htonll(n) default_bswap64(n) +#endif + +#endif + +/* + * Performance might be improved if the CPU architecture is OK with + * unaligned 32-bit loads and a fast ntohl() is available. + * Otherwise fall back to byte loads and shifts which is portable, + * and is faster on architectures with memory alignment issues. + */ + +#if defined(__i386__) || defined(__x86_64__) || \ + defined(_M_IX86) || defined(_M_X64) || \ + defined(__ppc__) || defined(__ppc64__) || \ + defined(__powerpc__) || defined(__powerpc64__) || \ + defined(__s390__) || defined(__s390x__) + +#define get_be16(p) ntohs(*(unsigned short *)(p)) +#define get_be32(p) ntohl(*(unsigned int *)(p)) +#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0) + +#else + +#define get_be16(p) ( \ + (*((unsigned char *)(p) + 0) << 8) | \ + (*((unsigned char *)(p) + 1) << 0) ) +#define get_be32(p) ( \ + (*((unsigned char *)(p) + 0) << 24) | \ + (*((unsigned char *)(p) + 1) << 16) | \ + (*((unsigned char *)(p) + 2) << 8) | \ + (*((unsigned char *)(p) + 3) << 0) ) +#define put_be32(p, v) do { \ + unsigned int __v = (v); \ + *((unsigned char *)(p) + 0) = __v >> 24; \ + *((unsigned char *)(p) + 1) = __v >> 16; \ + *((unsigned char *)(p) + 2) = __v >> 8; \ + *((unsigned char *)(p) + 3) = __v >> 0; } while (0) + +#endif diff --git a/compat/fopen.c b/compat/fopen.c new file mode 100644 index 0000000000..b5ca142fed --- /dev/null +++ b/compat/fopen.c @@ -0,0 +1,37 @@ +/* + * The order of the following two lines is important. + * + * FREAD_READS_DIRECTORIES is undefined before including git-compat-util.h + * to avoid the redefinition of fopen within git-compat-util.h. This is + * necessary since fopen is a macro on some platforms which may be set + * based on compiler options. For example, on AIX fopen is set to fopen64 + * when _LARGE_FILES is defined. The previous technique of merely undefining + * fopen after including git-compat-util.h is inadequate in this case. + */ +#undef FREAD_READS_DIRECTORIES +#include "../git-compat-util.h" + +FILE *git_fopen(const char *path, const char *mode) +{ + FILE *fp; + struct stat st; + + if (mode[0] == 'w' || mode[0] == 'a') + return fopen(path, mode); + + if (!(fp = fopen(path, mode))) + return NULL; + + if (fstat(fileno(fp), &st)) { + fclose(fp); + return NULL; + } + + if (S_ISDIR(st.st_mode)) { + fclose(fp); + errno = EISDIR; + return NULL; + } + + return fp; +} diff --git a/compat/gmtime.c b/compat/gmtime.c new file mode 100644 index 0000000000..e8362dd2b9 --- /dev/null +++ b/compat/gmtime.c @@ -0,0 +1,29 @@ +#include "../git-compat-util.h" +#undef gmtime +#undef gmtime_r + +struct tm *git_gmtime(const time_t *timep) +{ + static struct tm result; + return git_gmtime_r(timep, &result); +} + +struct tm *git_gmtime_r(const time_t *timep, struct tm *result) +{ + struct tm *ret; + + memset(result, 0, sizeof(*result)); + ret = gmtime_r(timep, result); + + /* + * Rather than NULL, FreeBSD gmtime simply leaves the "struct tm" + * untouched when it encounters overflow. Since "mday" cannot otherwise + * be zero, we can test this very quickly. + */ + if (ret && !ret->tm_mday) { + ret = NULL; + errno = EOVERFLOW; + } + + return ret; +} diff --git a/compat/hstrerror.c b/compat/hstrerror.c new file mode 100644 index 0000000000..069c555da4 --- /dev/null +++ b/compat/hstrerror.c @@ -0,0 +1,21 @@ +#include <string.h> +#include <stdio.h> +#include <netdb.h> + +const char *githstrerror(int err) +{ + static char buffer[48]; + switch (err) + { + case HOST_NOT_FOUND: + return "Authoritative answer: host not found"; + case NO_DATA: + return "Valid name, no data record of requested type"; + case NO_RECOVERY: + return "Non recoverable errors, FORMERR, REFUSED, NOTIMP"; + case TRY_AGAIN: + return "Non-authoritative \"host not found\", or SERVERFAIL"; + } + sprintf(buffer, "Name resolution error %d", err); + return buffer; +} diff --git a/compat/inet_ntop.c b/compat/inet_ntop.c new file mode 100644 index 0000000000..90b7cc45f3 --- /dev/null +++ b/compat/inet_ntop.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 1996-1999 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +#include "../git-compat-util.h" + +#ifndef NS_INADDRSZ +#define NS_INADDRSZ 4 +#endif +#ifndef NS_IN6ADDRSZ +#define NS_IN6ADDRSZ 16 +#endif +#ifndef NS_INT16SZ +#define NS_INT16SZ 2 +#endif + +/* + * WARNING: Don't even consider trying to compile this on a system where + * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. + */ + +/* const char * + * inet_ntop4(src, dst, size) + * format an IPv4 address + * return: + * `dst' (as a const) + * notes: + * (1) uses no statics + * (2) takes a u_char* not an in_addr as input + * author: + * Paul Vixie, 1996. + */ +static const char * +inet_ntop4(const u_char *src, char *dst, size_t size) +{ + static const char fmt[] = "%u.%u.%u.%u"; + char tmp[sizeof "255.255.255.255"]; + int nprinted; + + nprinted = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]); + if (nprinted < 0) + return (NULL); /* we assume "errno" was set by "snprintf()" */ + if ((size_t)nprinted > size) { + errno = ENOSPC; + return (NULL); + } + strcpy(dst, tmp); + return (dst); +} + +#ifndef NO_IPV6 +/* const char * + * inet_ntop6(src, dst, size) + * convert IPv6 binary address into presentation (printable) format + * author: + * Paul Vixie, 1996. + */ +static const char * +inet_ntop6(const u_char *src, char *dst, size_t size) +{ + /* + * Note that int32_t and int16_t need only be "at least" large enough + * to contain a value of the specified size. On some systems, like + * Crays, there is no such thing as an integer variable with 16 bits. + * Keep this in mind if you think this function should have been coded + * to use pointer overlays. All the world's not a VAX. + */ + char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; + struct { int base, len; } best, cur; + unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ]; + int i; + + /* + * Preprocess: + * Copy the input (bytewise) array into a wordwise array. + * Find the longest run of 0x00's in src[] for :: shorthanding. + */ + memset(words, '\0', sizeof words); + for (i = 0; i < NS_IN6ADDRSZ; i++) + words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); + best.base = -1; + best.len = 0; + cur.base = -1; + cur.len = 0; + for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { + if (words[i] == 0) { + if (cur.base == -1) + cur.base = i, cur.len = 1; + else + cur.len++; + } else { + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + cur.base = -1; + } + } + } + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + } + if (best.base != -1 && best.len < 2) + best.base = -1; + + /* + * Format the result. + */ + tp = tmp; + for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { + /* Are we inside the best run of 0x00's? */ + if (best.base != -1 && i >= best.base && + i < (best.base + best.len)) { + if (i == best.base) + *tp++ = ':'; + continue; + } + /* Are we following an initial run of 0x00s or any real hex? */ + if (i != 0) + *tp++ = ':'; + /* Is this address an encapsulated IPv4? */ + if (i == 6 && best.base == 0 && + (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) { + if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) + return (NULL); + tp += strlen(tp); + break; + } + tp += snprintf(tp, sizeof tmp - (tp - tmp), "%x", words[i]); + } + /* Was it a trailing run of 0x00's? */ + if (best.base != -1 && (best.base + best.len) == + (NS_IN6ADDRSZ / NS_INT16SZ)) + *tp++ = ':'; + *tp++ = '\0'; + + /* + * Check for overflow, copy, and we're done. + */ + if ((size_t)(tp - tmp) > size) { + errno = ENOSPC; + return (NULL); + } + strcpy(dst, tmp); + return (dst); +} +#endif + +/* char * + * inet_ntop(af, src, dst, size) + * convert a network format address to presentation format. + * return: + * pointer to presentation format address (`dst'), or NULL (see errno). + * author: + * Paul Vixie, 1996. + */ +const char * +inet_ntop(int af, const void *src, char *dst, size_t size) +{ + switch (af) { + case AF_INET: + return (inet_ntop4(src, dst, size)); +#ifndef NO_IPV6 + case AF_INET6: + return (inet_ntop6(src, dst, size)); +#endif + default: + errno = EAFNOSUPPORT; + return (NULL); + } + /* NOTREACHED */ +} diff --git a/compat/inet_pton.c b/compat/inet_pton.c new file mode 100644 index 0000000000..2b9a0a4e22 --- /dev/null +++ b/compat/inet_pton.c @@ -0,0 +1,215 @@ +/* + * Copyright (C) 1996-2001 Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM + * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, + * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "../git-compat-util.h" + +#ifndef NS_INT16SZ +#define NS_INT16SZ 2 +#endif + +#ifndef NS_INADDRSZ +#define NS_INADDRSZ 4 +#endif + +#ifndef NS_IN6ADDRSZ +#define NS_IN6ADDRSZ 16 +#endif + +/* + * WARNING: Don't even consider trying to compile this on a system where + * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. + */ + +static int inet_pton4(const char *src, unsigned char *dst); +#ifndef NO_IPV6 +static int inet_pton6(const char *src, unsigned char *dst); +#endif + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[NS_INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr(digits, ch)) != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return (0); + *tp = new; + if (! saw_digit) { + if (++octets > 4) + return (0); + saw_digit = 1; + } + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return (0); + *++tp = 0; + saw_digit = 0; + } else + return (0); + } + if (octets < 4) + return (0); + memcpy(dst, tmp, NS_INADDRSZ); + return (1); +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ + +#ifndef NO_IPV6 +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp; + const char *xdigits, *curtok; + int ch, saw_xdigit; + unsigned int val; + + memset((tp = tmp), '\0', NS_IN6ADDRSZ); + endp = tp + NS_IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return (0); + curtok = src; + saw_xdigit = 0; + val = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return (0); + saw_xdigit = 1; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return (0); + colonp = tp; + continue; + } + if (tp + NS_INT16SZ > endp) + return (0); + *tp++ = (unsigned char) (val >> 8) & 0xff; + *tp++ = (unsigned char) val & 0xff; + saw_xdigit = 0; + val = 0; + continue; + } + if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += NS_INADDRSZ; + saw_xdigit = 0; + break; /* '\0' was seen by inet_pton4(). */ + } + return (0); + } + if (saw_xdigit) { + if (tp + NS_INT16SZ > endp) + return (0); + *tp++ = (unsigned char) (val >> 8) & 0xff; + *tp++ = (unsigned char) val & 0xff; + } + if (colonp != NULL) { + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[- i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return (0); + memcpy(dst, tmp, NS_IN6ADDRSZ); + return (1); +} +#endif + +/* int + * isc_net_pton(af, src, dst) + * convert from presentation format (which usually means ASCII printable) + * to network format (which is usually some kind of binary format). + * return: + * 1 if the address was valid for the specified address family + * 0 if the address wasn't valid (`dst' is untouched in this case) + * -1 if some other error occurred (`dst' is untouched in this case, too) + * author: + * Paul Vixie, 1996. + */ +int +inet_pton(int af, const char *src, void *dst) +{ + switch (af) { + case AF_INET: + return (inet_pton4(src, dst)); +#ifndef NO_IPV6 + case AF_INET6: + return (inet_pton6(src, dst)); +#endif + default: + errno = EAFNOSUPPORT; + return (-1); + } + /* NOTREACHED */ +} diff --git a/compat/memmem.c b/compat/memmem.c new file mode 100644 index 0000000000..56bcb4277f --- /dev/null +++ b/compat/memmem.c @@ -0,0 +1,32 @@ +#include "../git-compat-util.h" + +void *gitmemmem(const void *haystack, size_t haystack_len, + const void *needle, size_t needle_len) +{ + const char *begin = haystack; + const char *last_possible = begin + haystack_len - needle_len; + const char *tail = needle; + char point; + + /* + * The first occurrence of the empty string is deemed to occur at + * the beginning of the string. + */ + if (needle_len == 0) + return (void *)begin; + + /* + * Sanity check, otherwise the loop might search through the whole + * memory. + */ + if (haystack_len < needle_len) + return NULL; + + point = *tail++; + for (; begin <= last_possible; begin++) { + if (*begin == point && !memcmp(begin + 1, tail, needle_len - 1)) + return (void *)begin; + } + + return NULL; +} diff --git a/compat/mingw.c b/compat/mingw.c new file mode 100644 index 0000000000..c5c37e53ce --- /dev/null +++ b/compat/mingw.c @@ -0,0 +1,2130 @@ +#include "../git-compat-util.h" +#include "win32.h" +#include <conio.h> +#include <wchar.h> +#include "../strbuf.h" +#include "../run-command.h" +#include "../cache.h" + +static const int delay[] = { 0, 1, 10, 20, 40 }; + +int err_win_to_posix(DWORD winerr) +{ + int error = ENOSYS; + switch(winerr) { + case ERROR_ACCESS_DENIED: error = EACCES; break; + case ERROR_ACCOUNT_DISABLED: error = EACCES; break; + case ERROR_ACCOUNT_RESTRICTION: error = EACCES; break; + case ERROR_ALREADY_ASSIGNED: error = EBUSY; break; + case ERROR_ALREADY_EXISTS: error = EEXIST; break; + case ERROR_ARITHMETIC_OVERFLOW: error = ERANGE; break; + case ERROR_BAD_COMMAND: error = EIO; break; + case ERROR_BAD_DEVICE: error = ENODEV; break; + case ERROR_BAD_DRIVER_LEVEL: error = ENXIO; break; + case ERROR_BAD_EXE_FORMAT: error = ENOEXEC; break; + case ERROR_BAD_FORMAT: error = ENOEXEC; break; + case ERROR_BAD_LENGTH: error = EINVAL; break; + case ERROR_BAD_PATHNAME: error = ENOENT; break; + case ERROR_BAD_PIPE: error = EPIPE; break; + case ERROR_BAD_UNIT: error = ENODEV; break; + case ERROR_BAD_USERNAME: error = EINVAL; break; + case ERROR_BROKEN_PIPE: error = EPIPE; break; + case ERROR_BUFFER_OVERFLOW: error = ENAMETOOLONG; break; + case ERROR_BUSY: error = EBUSY; break; + case ERROR_BUSY_DRIVE: error = EBUSY; break; + case ERROR_CALL_NOT_IMPLEMENTED: error = ENOSYS; break; + case ERROR_CANNOT_MAKE: error = EACCES; break; + case ERROR_CANTOPEN: error = EIO; break; + case ERROR_CANTREAD: error = EIO; break; + case ERROR_CANTWRITE: error = EIO; break; + case ERROR_CRC: error = EIO; break; + case ERROR_CURRENT_DIRECTORY: error = EACCES; break; + case ERROR_DEVICE_IN_USE: error = EBUSY; break; + case ERROR_DEV_NOT_EXIST: error = ENODEV; break; + case ERROR_DIRECTORY: error = EINVAL; break; + case ERROR_DIR_NOT_EMPTY: error = ENOTEMPTY; break; + case ERROR_DISK_CHANGE: error = EIO; break; + case ERROR_DISK_FULL: error = ENOSPC; break; + case ERROR_DRIVE_LOCKED: error = EBUSY; break; + case ERROR_ENVVAR_NOT_FOUND: error = EINVAL; break; + case ERROR_EXE_MARKED_INVALID: error = ENOEXEC; break; + case ERROR_FILENAME_EXCED_RANGE: error = ENAMETOOLONG; break; + case ERROR_FILE_EXISTS: error = EEXIST; break; + case ERROR_FILE_INVALID: error = ENODEV; break; + case ERROR_FILE_NOT_FOUND: error = ENOENT; break; + case ERROR_GEN_FAILURE: error = EIO; break; + case ERROR_HANDLE_DISK_FULL: error = ENOSPC; break; + case ERROR_INSUFFICIENT_BUFFER: error = ENOMEM; break; + case ERROR_INVALID_ACCESS: error = EACCES; break; + case ERROR_INVALID_ADDRESS: error = EFAULT; break; + case ERROR_INVALID_BLOCK: error = EFAULT; break; + case ERROR_INVALID_DATA: error = EINVAL; break; + case ERROR_INVALID_DRIVE: error = ENODEV; break; + case ERROR_INVALID_EXE_SIGNATURE: error = ENOEXEC; break; + case ERROR_INVALID_FLAGS: error = EINVAL; break; + case ERROR_INVALID_FUNCTION: error = ENOSYS; break; + case ERROR_INVALID_HANDLE: error = EBADF; break; + case ERROR_INVALID_LOGON_HOURS: error = EACCES; break; + case ERROR_INVALID_NAME: error = EINVAL; break; + case ERROR_INVALID_OWNER: error = EINVAL; break; + case ERROR_INVALID_PARAMETER: error = EINVAL; break; + case ERROR_INVALID_PASSWORD: error = EPERM; break; + case ERROR_INVALID_PRIMARY_GROUP: error = EINVAL; break; + case ERROR_INVALID_SIGNAL_NUMBER: error = EINVAL; break; + case ERROR_INVALID_TARGET_HANDLE: error = EIO; break; + case ERROR_INVALID_WORKSTATION: error = EACCES; break; + case ERROR_IO_DEVICE: error = EIO; break; + case ERROR_IO_INCOMPLETE: error = EINTR; break; + case ERROR_LOCKED: error = EBUSY; break; + case ERROR_LOCK_VIOLATION: error = EACCES; break; + case ERROR_LOGON_FAILURE: error = EACCES; break; + case ERROR_MAPPED_ALIGNMENT: error = EINVAL; break; + case ERROR_META_EXPANSION_TOO_LONG: error = E2BIG; break; + case ERROR_MORE_DATA: error = EPIPE; break; + case ERROR_NEGATIVE_SEEK: error = ESPIPE; break; + case ERROR_NOACCESS: error = EFAULT; break; + case ERROR_NONE_MAPPED: error = EINVAL; break; + case ERROR_NOT_ENOUGH_MEMORY: error = ENOMEM; break; + case ERROR_NOT_READY: error = EAGAIN; break; + case ERROR_NOT_SAME_DEVICE: error = EXDEV; break; + case ERROR_NO_DATA: error = EPIPE; break; + case ERROR_NO_MORE_SEARCH_HANDLES: error = EIO; break; + case ERROR_NO_PROC_SLOTS: error = EAGAIN; break; + case ERROR_NO_SUCH_PRIVILEGE: error = EACCES; break; + case ERROR_OPEN_FAILED: error = EIO; break; + case ERROR_OPEN_FILES: error = EBUSY; break; + case ERROR_OPERATION_ABORTED: error = EINTR; break; + case ERROR_OUTOFMEMORY: error = ENOMEM; break; + case ERROR_PASSWORD_EXPIRED: error = EACCES; break; + case ERROR_PATH_BUSY: error = EBUSY; break; + case ERROR_PATH_NOT_FOUND: error = ENOENT; break; + case ERROR_PIPE_BUSY: error = EBUSY; break; + case ERROR_PIPE_CONNECTED: error = EPIPE; break; + case ERROR_PIPE_LISTENING: error = EPIPE; break; + case ERROR_PIPE_NOT_CONNECTED: error = EPIPE; break; + case ERROR_PRIVILEGE_NOT_HELD: error = EACCES; break; + case ERROR_READ_FAULT: error = EIO; break; + case ERROR_SEEK: error = EIO; break; + case ERROR_SEEK_ON_DEVICE: error = ESPIPE; break; + case ERROR_SHARING_BUFFER_EXCEEDED: error = ENFILE; break; + case ERROR_SHARING_VIOLATION: error = EACCES; break; + case ERROR_STACK_OVERFLOW: error = ENOMEM; break; + case ERROR_SWAPERROR: error = ENOENT; break; + case ERROR_TOO_MANY_MODULES: error = EMFILE; break; + case ERROR_TOO_MANY_OPEN_FILES: error = EMFILE; break; + case ERROR_UNRECOGNIZED_MEDIA: error = ENXIO; break; + case ERROR_UNRECOGNIZED_VOLUME: error = ENODEV; break; + case ERROR_WAIT_NO_CHILDREN: error = ECHILD; break; + case ERROR_WRITE_FAULT: error = EIO; break; + case ERROR_WRITE_PROTECT: error = EROFS; break; + } + return error; +} + +static inline int is_file_in_use_error(DWORD errcode) +{ + switch (errcode) { + case ERROR_SHARING_VIOLATION: + case ERROR_ACCESS_DENIED: + return 1; + } + + return 0; +} + +static int read_yes_no_answer(void) +{ + char answer[1024]; + + if (fgets(answer, sizeof(answer), stdin)) { + size_t answer_len = strlen(answer); + int got_full_line = 0, c; + + /* remove the newline */ + if (answer_len >= 2 && answer[answer_len-2] == '\r') { + answer[answer_len-2] = '\0'; + got_full_line = 1; + } else if (answer_len >= 1 && answer[answer_len-1] == '\n') { + answer[answer_len-1] = '\0'; + got_full_line = 1; + } + /* flush the buffer in case we did not get the full line */ + if (!got_full_line) + while ((c = getchar()) != EOF && c != '\n') + ; + } else + /* we could not read, return the + * default answer which is no */ + return 0; + + if (tolower(answer[0]) == 'y' && !answer[1]) + return 1; + if (!strncasecmp(answer, "yes", sizeof(answer))) + return 1; + if (tolower(answer[0]) == 'n' && !answer[1]) + return 0; + if (!strncasecmp(answer, "no", sizeof(answer))) + return 0; + + /* did not find an answer we understand */ + return -1; +} + +static int ask_yes_no_if_possible(const char *format, ...) +{ + char question[4096]; + const char *retry_hook[] = { NULL, NULL, NULL }; + va_list args; + + va_start(args, format); + vsnprintf(question, sizeof(question), format, args); + va_end(args); + + if ((retry_hook[0] = mingw_getenv("GIT_ASK_YESNO"))) { + retry_hook[1] = question; + return !run_command_v_opt(retry_hook, 0); + } + + if (!isatty(_fileno(stdin)) || !isatty(_fileno(stderr))) + return 0; + + while (1) { + int answer; + fprintf(stderr, "%s (y/n) ", question); + + if ((answer = read_yes_no_answer()) >= 0) + return answer; + + fprintf(stderr, "Sorry, I did not understand your answer. " + "Please type 'y' or 'n'\n"); + } +} + +int mingw_unlink(const char *pathname) +{ + int ret, tries = 0; + wchar_t wpathname[MAX_PATH]; + if (xutftowcs_path(wpathname, pathname) < 0) + return -1; + + /* read-only files cannot be removed */ + _wchmod(wpathname, 0666); + while ((ret = _wunlink(wpathname)) == -1 && tries < ARRAY_SIZE(delay)) { + if (!is_file_in_use_error(GetLastError())) + break; + /* + * We assume that some other process had the source or + * destination file open at the wrong moment and retry. + * In order to give the other process a higher chance to + * complete its operation, we give up our time slice now. + * If we have to retry again, we do sleep a bit. + */ + Sleep(delay[tries]); + tries++; + } + while (ret == -1 && is_file_in_use_error(GetLastError()) && + ask_yes_no_if_possible("Unlink of file '%s' failed. " + "Should I try again?", pathname)) + ret = _wunlink(wpathname); + return ret; +} + +static int is_dir_empty(const wchar_t *wpath) +{ + WIN32_FIND_DATAW findbuf; + HANDLE handle; + wchar_t wbuf[MAX_PATH + 2]; + wcscpy(wbuf, wpath); + wcscat(wbuf, L"\\*"); + handle = FindFirstFileW(wbuf, &findbuf); + if (handle == INVALID_HANDLE_VALUE) + return GetLastError() == ERROR_NO_MORE_FILES; + + while (!wcscmp(findbuf.cFileName, L".") || + !wcscmp(findbuf.cFileName, L"..")) + if (!FindNextFileW(handle, &findbuf)) { + DWORD err = GetLastError(); + FindClose(handle); + return err == ERROR_NO_MORE_FILES; + } + FindClose(handle); + return 0; +} + +int mingw_rmdir(const char *pathname) +{ + int ret, tries = 0; + wchar_t wpathname[MAX_PATH]; + if (xutftowcs_path(wpathname, pathname) < 0) + return -1; + + while ((ret = _wrmdir(wpathname)) == -1 && tries < ARRAY_SIZE(delay)) { + if (!is_file_in_use_error(GetLastError())) + errno = err_win_to_posix(GetLastError()); + if (errno != EACCES) + break; + if (!is_dir_empty(wpathname)) { + errno = ENOTEMPTY; + break; + } + /* + * We assume that some other process had the source or + * destination file open at the wrong moment and retry. + * In order to give the other process a higher chance to + * complete its operation, we give up our time slice now. + * If we have to retry again, we do sleep a bit. + */ + Sleep(delay[tries]); + tries++; + } + while (ret == -1 && errno == EACCES && is_file_in_use_error(GetLastError()) && + ask_yes_no_if_possible("Deletion of directory '%s' failed. " + "Should I try again?", pathname)) + ret = _wrmdir(wpathname); + return ret; +} + +int mingw_mkdir(const char *path, int mode) +{ + int ret; + wchar_t wpath[MAX_PATH]; + if (xutftowcs_path(wpath, path) < 0) + return -1; + ret = _wmkdir(wpath); + return ret; +} + +int mingw_open (const char *filename, int oflags, ...) +{ + va_list args; + unsigned mode; + int fd; + wchar_t wfilename[MAX_PATH]; + + va_start(args, oflags); + mode = va_arg(args, int); + va_end(args); + + if (filename && !strcmp(filename, "/dev/null")) + filename = "nul"; + + if (xutftowcs_path(wfilename, filename) < 0) + return -1; + fd = _wopen(wfilename, oflags, mode); + + if (fd < 0 && (oflags & O_CREAT) && errno == EACCES) { + DWORD attrs = GetFileAttributesW(wfilename); + if (attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY)) + errno = EISDIR; + } + return fd; +} + +static BOOL WINAPI ctrl_ignore(DWORD type) +{ + return TRUE; +} + +#undef fgetc +int mingw_fgetc(FILE *stream) +{ + int ch; + if (!isatty(_fileno(stream))) + return fgetc(stream); + + SetConsoleCtrlHandler(ctrl_ignore, TRUE); + while (1) { + ch = fgetc(stream); + if (ch != EOF || GetLastError() != ERROR_OPERATION_ABORTED) + break; + + /* Ctrl+C was pressed, simulate SIGINT and retry */ + mingw_raise(SIGINT); + } + SetConsoleCtrlHandler(ctrl_ignore, FALSE); + return ch; +} + +#undef fopen +FILE *mingw_fopen (const char *filename, const char *otype) +{ + FILE *file; + wchar_t wfilename[MAX_PATH], wotype[4]; + if (filename && !strcmp(filename, "/dev/null")) + filename = "nul"; + if (xutftowcs_path(wfilename, filename) < 0 || + xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0) + return NULL; + file = _wfopen(wfilename, wotype); + return file; +} + +FILE *mingw_freopen (const char *filename, const char *otype, FILE *stream) +{ + FILE *file; + wchar_t wfilename[MAX_PATH], wotype[4]; + if (filename && !strcmp(filename, "/dev/null")) + filename = "nul"; + if (xutftowcs_path(wfilename, filename) < 0 || + xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0) + return NULL; + file = _wfreopen(wfilename, wotype, stream); + return file; +} + +#undef fflush +int mingw_fflush(FILE *stream) +{ + int ret = fflush(stream); + + /* + * write() is used behind the scenes of stdio output functions. + * Since git code does not check for errors after each stdio write + * operation, it can happen that write() is called by a later + * stdio function even if an earlier write() call failed. In the + * case of a pipe whose readable end was closed, only the first + * call to write() reports EPIPE on Windows. Subsequent write() + * calls report EINVAL. It is impossible to notice whether this + * fflush invocation triggered such a case, therefore, we have to + * catch all EINVAL errors whole-sale. + */ + if (ret && errno == EINVAL) + errno = EPIPE; + + return ret; +} + +int mingw_access(const char *filename, int mode) +{ + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, filename) < 0) + return -1; + /* X_OK is not supported by the MSVCRT version */ + return _waccess(wfilename, mode & ~X_OK); +} + +int mingw_chdir(const char *dirname) +{ + wchar_t wdirname[MAX_PATH]; + if (xutftowcs_path(wdirname, dirname) < 0) + return -1; + return _wchdir(wdirname); +} + +int mingw_chmod(const char *filename, int mode) +{ + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, filename) < 0) + return -1; + return _wchmod(wfilename, mode); +} + +/* + * The unit of FILETIME is 100-nanoseconds since January 1, 1601, UTC. + * Returns the 100-nanoseconds ("hekto nanoseconds") since the epoch. + */ +static inline long long filetime_to_hnsec(const FILETIME *ft) +{ + long long winTime = ((long long)ft->dwHighDateTime << 32) + ft->dwLowDateTime; + /* Windows to Unix Epoch conversion */ + return winTime - 116444736000000000LL; +} + +static inline time_t filetime_to_time_t(const FILETIME *ft) +{ + return (time_t)(filetime_to_hnsec(ft) / 10000000); +} + +/* We keep the do_lstat code in a separate function to avoid recursion. + * When a path ends with a slash, the stat will fail with ENOENT. In + * this case, we strip the trailing slashes and stat again. + * + * If follow is true then act like stat() and report on the link + * target. Otherwise report on the link itself. + */ +static int do_lstat(int follow, const char *file_name, struct stat *buf) +{ + WIN32_FILE_ATTRIBUTE_DATA fdata; + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, file_name) < 0) + return -1; + + if (GetFileAttributesExW(wfilename, GetFileExInfoStandard, &fdata)) { + buf->st_ino = 0; + buf->st_gid = 0; + buf->st_uid = 0; + buf->st_nlink = 1; + buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes); + buf->st_size = fdata.nFileSizeLow | + (((off_t)fdata.nFileSizeHigh)<<32); + buf->st_dev = buf->st_rdev = 0; /* not used by Git */ + buf->st_atime = filetime_to_time_t(&(fdata.ftLastAccessTime)); + buf->st_mtime = filetime_to_time_t(&(fdata.ftLastWriteTime)); + buf->st_ctime = filetime_to_time_t(&(fdata.ftCreationTime)); + if (fdata.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { + WIN32_FIND_DATAW findbuf; + HANDLE handle = FindFirstFileW(wfilename, &findbuf); + if (handle != INVALID_HANDLE_VALUE) { + if ((findbuf.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) && + (findbuf.dwReserved0 == IO_REPARSE_TAG_SYMLINK)) { + if (follow) { + char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + buf->st_size = readlink(file_name, buffer, MAXIMUM_REPARSE_DATA_BUFFER_SIZE); + } else { + buf->st_mode = S_IFLNK; + } + buf->st_mode |= S_IREAD; + if (!(findbuf.dwFileAttributes & FILE_ATTRIBUTE_READONLY)) + buf->st_mode |= S_IWRITE; + } + FindClose(handle); + } + } + return 0; + } + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_LOCK_VIOLATION: + case ERROR_SHARING_BUFFER_EXCEEDED: + errno = EACCES; + break; + case ERROR_BUFFER_OVERFLOW: + errno = ENAMETOOLONG; + break; + case ERROR_NOT_ENOUGH_MEMORY: + errno = ENOMEM; + break; + default: + errno = ENOENT; + break; + } + return -1; +} + +/* We provide our own lstat/fstat functions, since the provided + * lstat/fstat functions are so slow. These stat functions are + * tailored for Git's usage (read: fast), and are not meant to be + * complete. Note that Git stat()s are redirected to mingw_lstat() + * too, since Windows doesn't really handle symlinks that well. + */ +static int do_stat_internal(int follow, const char *file_name, struct stat *buf) +{ + int namelen; + char alt_name[PATH_MAX]; + + if (!do_lstat(follow, file_name, buf)) + return 0; + + /* if file_name ended in a '/', Windows returned ENOENT; + * try again without trailing slashes + */ + if (errno != ENOENT) + return -1; + + namelen = strlen(file_name); + if (namelen && file_name[namelen-1] != '/') + return -1; + while (namelen && file_name[namelen-1] == '/') + --namelen; + if (!namelen || namelen >= PATH_MAX) + return -1; + + memcpy(alt_name, file_name, namelen); + alt_name[namelen] = 0; + return do_lstat(follow, alt_name, buf); +} + +int mingw_lstat(const char *file_name, struct stat *buf) +{ + return do_stat_internal(0, file_name, buf); +} +int mingw_stat(const char *file_name, struct stat *buf) +{ + return do_stat_internal(1, file_name, buf); +} + +int mingw_fstat(int fd, struct stat *buf) +{ + HANDLE fh = (HANDLE)_get_osfhandle(fd); + BY_HANDLE_FILE_INFORMATION fdata; + + if (fh == INVALID_HANDLE_VALUE) { + errno = EBADF; + return -1; + } + /* direct non-file handles to MS's fstat() */ + if (GetFileType(fh) != FILE_TYPE_DISK) + return _fstati64(fd, buf); + + if (GetFileInformationByHandle(fh, &fdata)) { + buf->st_ino = 0; + buf->st_gid = 0; + buf->st_uid = 0; + buf->st_nlink = 1; + buf->st_mode = file_attr_to_st_mode(fdata.dwFileAttributes); + buf->st_size = fdata.nFileSizeLow | + (((off_t)fdata.nFileSizeHigh)<<32); + buf->st_dev = buf->st_rdev = 0; /* not used by Git */ + buf->st_atime = filetime_to_time_t(&(fdata.ftLastAccessTime)); + buf->st_mtime = filetime_to_time_t(&(fdata.ftLastWriteTime)); + buf->st_ctime = filetime_to_time_t(&(fdata.ftCreationTime)); + return 0; + } + errno = EBADF; + return -1; +} + +static inline void time_t_to_filetime(time_t t, FILETIME *ft) +{ + long long winTime = t * 10000000LL + 116444736000000000LL; + ft->dwLowDateTime = winTime; + ft->dwHighDateTime = winTime >> 32; +} + +int mingw_utime (const char *file_name, const struct utimbuf *times) +{ + FILETIME mft, aft; + int fh, rc; + DWORD attrs; + wchar_t wfilename[MAX_PATH]; + if (xutftowcs_path(wfilename, file_name) < 0) + return -1; + + /* must have write permission */ + attrs = GetFileAttributesW(wfilename); + if (attrs != INVALID_FILE_ATTRIBUTES && + (attrs & FILE_ATTRIBUTE_READONLY)) { + /* ignore errors here; open() will report them */ + SetFileAttributesW(wfilename, attrs & ~FILE_ATTRIBUTE_READONLY); + } + + if ((fh = _wopen(wfilename, O_RDWR | O_BINARY)) < 0) { + rc = -1; + goto revert_attrs; + } + + if (times) { + time_t_to_filetime(times->modtime, &mft); + time_t_to_filetime(times->actime, &aft); + } else { + GetSystemTimeAsFileTime(&mft); + aft = mft; + } + if (!SetFileTime((HANDLE)_get_osfhandle(fh), NULL, &aft, &mft)) { + errno = EINVAL; + rc = -1; + } else + rc = 0; + close(fh); + +revert_attrs: + if (attrs != INVALID_FILE_ATTRIBUTES && + (attrs & FILE_ATTRIBUTE_READONLY)) { + /* ignore errors again */ + SetFileAttributesW(wfilename, attrs); + } + return rc; +} + +unsigned int sleep (unsigned int seconds) +{ + Sleep(seconds*1000); + return 0; +} + +char *mingw_mktemp(char *template) +{ + wchar_t wtemplate[MAX_PATH]; + if (xutftowcs_path(wtemplate, template) < 0) + return NULL; + if (!_wmktemp(wtemplate)) + return NULL; + if (xwcstoutf(template, wtemplate, strlen(template) + 1) < 0) + return NULL; + return template; +} + +int mkstemp(char *template) +{ + char *filename = mktemp(template); + if (filename == NULL) + return -1; + return open(filename, O_RDWR | O_CREAT, 0600); +} + +int gettimeofday(struct timeval *tv, void *tz) +{ + FILETIME ft; + long long hnsec; + + GetSystemTimeAsFileTime(&ft); + hnsec = filetime_to_hnsec(&ft); + tv->tv_sec = hnsec / 10000000; + tv->tv_usec = (hnsec % 10000000) / 10; + return 0; +} + +int pipe(int filedes[2]) +{ + HANDLE h[2]; + + /* this creates non-inheritable handles */ + if (!CreatePipe(&h[0], &h[1], NULL, 8192)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + filedes[0] = _open_osfhandle((int)h[0], O_NOINHERIT); + if (filedes[0] < 0) { + CloseHandle(h[0]); + CloseHandle(h[1]); + return -1; + } + filedes[1] = _open_osfhandle((int)h[1], O_NOINHERIT); + if (filedes[0] < 0) { + close(filedes[0]); + CloseHandle(h[1]); + return -1; + } + return 0; +} + +struct tm *gmtime_r(const time_t *timep, struct tm *result) +{ + /* gmtime() in MSVCRT.DLL is thread-safe, but not reentrant */ + memcpy(result, gmtime(timep), sizeof(struct tm)); + return result; +} + +struct tm *localtime_r(const time_t *timep, struct tm *result) +{ + /* localtime() in MSVCRT.DLL is thread-safe, but not reentrant */ + memcpy(result, localtime(timep), sizeof(struct tm)); + return result; +} + +char *mingw_getcwd(char *pointer, int len) +{ + int i; + wchar_t wpointer[MAX_PATH]; + if (!_wgetcwd(wpointer, ARRAY_SIZE(wpointer))) + return NULL; + if (xwcstoutf(pointer, wpointer, len) < 0) + return NULL; + for (i = 0; pointer[i]; i++) + if (pointer[i] == '\\') + pointer[i] = '/'; + return pointer; +} + +/* + * See http://msdn2.microsoft.com/en-us/library/17w5ykft(vs.71).aspx + * (Parsing C++ Command-Line Arguments) + */ +static const char *quote_arg(const char *arg) +{ + /* count chars to quote */ + int len = 0, n = 0; + int force_quotes = 0; + char *q, *d; + const char *p = arg; + if (!*p) force_quotes = 1; + while (*p) { + if (isspace(*p) || *p == '*' || *p == '?' || *p == '{' || *p == '\'') + force_quotes = 1; + else if (*p == '"') + n++; + else if (*p == '\\') { + int count = 0; + while (*p == '\\') { + count++; + p++; + len++; + } + if (*p == '"') + n += count*2 + 1; + continue; + } + len++; + p++; + } + if (!force_quotes && n == 0) + return arg; + + /* insert \ where necessary */ + d = q = xmalloc(len+n+3); + *d++ = '"'; + while (*arg) { + if (*arg == '"') + *d++ = '\\'; + else if (*arg == '\\') { + int count = 0; + while (*arg == '\\') { + count++; + *d++ = *arg++; + } + if (*arg == '"') { + while (count-- > 0) + *d++ = '\\'; + *d++ = '\\'; + } + } + *d++ = *arg++; + } + *d++ = '"'; + *d++ = 0; + return q; +} + +static const char *parse_interpreter(const char *cmd) +{ + static char buf[100]; + char *p, *opt; + int n, fd; + + /* don't even try a .exe */ + n = strlen(cmd); + if (n >= 4 && !strcasecmp(cmd+n-4, ".exe")) + return NULL; + + fd = open(cmd, O_RDONLY); + if (fd < 0) + return NULL; + n = read(fd, buf, sizeof(buf)-1); + close(fd); + if (n < 4) /* at least '#!/x' and not error */ + return NULL; + + if (buf[0] != '#' || buf[1] != '!') + return NULL; + buf[n] = '\0'; + p = buf + strcspn(buf, "\r\n"); + if (!*p) + return NULL; + + *p = '\0'; + if (!(p = strrchr(buf+2, '/')) && !(p = strrchr(buf+2, '\\'))) + return NULL; + /* strip options */ + if ((opt = strchr(p+1, ' '))) + *opt = '\0'; + return p+1; +} + +/* + * Splits the PATH into parts. + */ +static char **get_path_split(void) +{ + char *p, **path, *envpath = mingw_getenv("PATH"); + int i, n = 0; + + if (!envpath || !*envpath) + return NULL; + + envpath = xstrdup(envpath); + p = envpath; + while (p) { + char *dir = p; + p = strchr(p, ';'); + if (p) *p++ = '\0'; + if (*dir) { /* not earlier, catches series of ; */ + ++n; + } + } + if (!n) + return NULL; + + path = xmalloc((n+1)*sizeof(char *)); + p = envpath; + i = 0; + do { + if (*p) + path[i++] = xstrdup(p); + p = p+strlen(p)+1; + } while (i < n); + path[i] = NULL; + + free(envpath); + + return path; +} + +static void free_path_split(char **path) +{ + char **p = path; + + if (!path) + return; + + while (*p) + free(*p++); + free(path); +} + +/* + * exe_only means that we only want to detect .exe files, but not scripts + * (which do not have an extension) + */ +static char *lookup_prog(const char *dir, const char *cmd, int isexe, int exe_only) +{ + char path[MAX_PATH]; + snprintf(path, sizeof(path), "%s/%s.exe", dir, cmd); + + if (!isexe && access(path, F_OK) == 0) + return xstrdup(path); + path[strlen(path)-4] = '\0'; + if ((!exe_only || isexe) && access(path, F_OK) == 0) + if (!(GetFileAttributes(path) & FILE_ATTRIBUTE_DIRECTORY)) + return xstrdup(path); + return NULL; +} + +/* + * Determines the absolute path of cmd using the split path in path. + * If cmd contains a slash or backslash, no lookup is performed. + */ +static char *path_lookup(const char *cmd, char **path, int exe_only) +{ + char *prog = NULL; + int len = strlen(cmd); + int isexe = len >= 4 && !strcasecmp(cmd+len-4, ".exe"); + + if (strchr(cmd, '/') || strchr(cmd, '\\')) + prog = xstrdup(cmd); + + while (!prog && *path) + prog = lookup_prog(*path++, cmd, isexe, exe_only); + + return prog; +} + +static int do_putenv(char **env, const char *name, int size, int free_old); + +/* used number of elements of environ array, including terminating NULL */ +static int environ_size = 0; +/* allocated size of environ array, in bytes */ +static int environ_alloc = 0; + +/* + * Create environment block suitable for CreateProcess. Merges current + * process environment and the supplied environment changes. + */ +static wchar_t *make_environment_block(char **deltaenv) +{ + wchar_t *wenvblk = NULL; + char **tmpenv; + int i = 0, size = environ_size, wenvsz = 0, wenvpos = 0; + + while (deltaenv && deltaenv[i]) + i++; + + /* copy the environment, leaving space for changes */ + tmpenv = xmalloc((size + i) * sizeof(char*)); + memcpy(tmpenv, environ, size * sizeof(char*)); + + /* merge supplied environment changes into the temporary environment */ + for (i = 0; deltaenv && deltaenv[i]; i++) + size = do_putenv(tmpenv, deltaenv[i], size, 0); + + /* create environment block from temporary environment */ + for (i = 0; tmpenv[i]; i++) { + size = 2 * strlen(tmpenv[i]) + 2; /* +2 for final \0 */ + ALLOC_GROW(wenvblk, (wenvpos + size) * sizeof(wchar_t), wenvsz); + wenvpos += xutftowcs(&wenvblk[wenvpos], tmpenv[i], size) + 1; + } + /* add final \0 terminator */ + wenvblk[wenvpos] = 0; + free(tmpenv); + return wenvblk; +} + +struct pinfo_t { + struct pinfo_t *next; + pid_t pid; + HANDLE proc; +}; +static struct pinfo_t *pinfo = NULL; +CRITICAL_SECTION pinfo_cs; + +static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaenv, + const char *dir, + int prepend_cmd, int fhin, int fhout, int fherr) +{ + STARTUPINFOW si; + PROCESS_INFORMATION pi; + struct strbuf args; + wchar_t wcmd[MAX_PATH], wdir[MAX_PATH], *wargs, *wenvblk = NULL; + unsigned flags = CREATE_UNICODE_ENVIRONMENT; + BOOL ret; + + /* Determine whether or not we are associated to a console */ + HANDLE cons = CreateFile("CONOUT$", GENERIC_WRITE, + FILE_SHARE_WRITE, NULL, OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, NULL); + if (cons == INVALID_HANDLE_VALUE) { + /* There is no console associated with this process. + * Since the child is a console process, Windows + * would normally create a console window. But + * since we'll be redirecting std streams, we do + * not need the console. + * It is necessary to use DETACHED_PROCESS + * instead of CREATE_NO_WINDOW to make ssh + * recognize that it has no console. + */ + flags |= DETACHED_PROCESS; + } else { + /* There is already a console. If we specified + * DETACHED_PROCESS here, too, Windows would + * disassociate the child from the console. + * The same is true for CREATE_NO_WINDOW. + * Go figure! + */ + CloseHandle(cons); + } + memset(&si, 0, sizeof(si)); + si.cb = sizeof(si); + si.dwFlags = STARTF_USESTDHANDLES; + si.hStdInput = winansi_get_osfhandle(fhin); + si.hStdOutput = winansi_get_osfhandle(fhout); + si.hStdError = winansi_get_osfhandle(fherr); + + if (xutftowcs_path(wcmd, cmd) < 0) + return -1; + if (dir && xutftowcs_path(wdir, dir) < 0) + return -1; + + /* concatenate argv, quoting args as we go */ + strbuf_init(&args, 0); + if (prepend_cmd) { + char *quoted = (char *)quote_arg(cmd); + strbuf_addstr(&args, quoted); + if (quoted != cmd) + free(quoted); + } + for (; *argv; argv++) { + char *quoted = (char *)quote_arg(*argv); + if (*args.buf) + strbuf_addch(&args, ' '); + strbuf_addstr(&args, quoted); + if (quoted != *argv) + free(quoted); + } + + wargs = xmalloc((2 * args.len + 1) * sizeof(wchar_t)); + xutftowcs(wargs, args.buf, 2 * args.len + 1); + strbuf_release(&args); + + wenvblk = make_environment_block(deltaenv); + + memset(&pi, 0, sizeof(pi)); + ret = CreateProcessW(wcmd, wargs, NULL, NULL, TRUE, flags, + wenvblk, dir ? wdir : NULL, &si, &pi); + + free(wenvblk); + free(wargs); + + if (!ret) { + errno = ENOENT; + return -1; + } + CloseHandle(pi.hThread); + + /* + * The process ID is the human-readable identifier of the process + * that we want to present in log and error messages. The handle + * is not useful for this purpose. But we cannot close it, either, + * because it is not possible to turn a process ID into a process + * handle after the process terminated. + * Keep the handle in a list for waitpid. + */ + EnterCriticalSection(&pinfo_cs); + { + struct pinfo_t *info = xmalloc(sizeof(struct pinfo_t)); + info->pid = pi.dwProcessId; + info->proc = pi.hProcess; + info->next = pinfo; + pinfo = info; + } + LeaveCriticalSection(&pinfo_cs); + + return (pid_t)pi.dwProcessId; +} + +static pid_t mingw_spawnv(const char *cmd, const char **argv, int prepend_cmd) +{ + return mingw_spawnve_fd(cmd, argv, NULL, NULL, prepend_cmd, 0, 1, 2); +} + +pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **deltaenv, + const char *dir, + int fhin, int fhout, int fherr) +{ + pid_t pid; + char **path = get_path_split(); + char *prog = path_lookup(cmd, path, 0); + + if (!prog) { + errno = ENOENT; + pid = -1; + } + else { + const char *interpr = parse_interpreter(prog); + + if (interpr) { + const char *argv0 = argv[0]; + char *iprog = path_lookup(interpr, path, 1); + argv[0] = prog; + if (!iprog) { + errno = ENOENT; + pid = -1; + } + else { + pid = mingw_spawnve_fd(iprog, argv, deltaenv, dir, 1, + fhin, fhout, fherr); + free(iprog); + } + argv[0] = argv0; + } + else + pid = mingw_spawnve_fd(prog, argv, deltaenv, dir, 0, + fhin, fhout, fherr); + free(prog); + } + free_path_split(path); + return pid; +} + +static int try_shell_exec(const char *cmd, char *const *argv) +{ + const char *interpr = parse_interpreter(cmd); + char **path; + char *prog; + int pid = 0; + + if (!interpr) + return 0; + path = get_path_split(); + prog = path_lookup(interpr, path, 1); + if (prog) { + int argc = 0; + const char **argv2; + while (argv[argc]) argc++; + argv2 = xmalloc(sizeof(*argv) * (argc+1)); + argv2[0] = (char *)cmd; /* full path to the script file */ + memcpy(&argv2[1], &argv[1], sizeof(*argv) * argc); + pid = mingw_spawnv(prog, argv2, 1); + if (pid >= 0) { + int status; + if (waitpid(pid, &status, 0) < 0) + status = 255; + exit(status); + } + pid = 1; /* indicate that we tried but failed */ + free(prog); + free(argv2); + } + free_path_split(path); + return pid; +} + +int mingw_execv(const char *cmd, char *const *argv) +{ + /* check if git_command is a shell script */ + if (!try_shell_exec(cmd, argv)) { + int pid, status; + + pid = mingw_spawnv(cmd, (const char **)argv, 0); + if (pid < 0) + return -1; + if (waitpid(pid, &status, 0) < 0) + status = 255; + exit(status); + } + return -1; +} + +int mingw_execvp(const char *cmd, char *const *argv) +{ + char **path = get_path_split(); + char *prog = path_lookup(cmd, path, 0); + + if (prog) { + mingw_execv(prog, argv); + free(prog); + } else + errno = ENOENT; + + free_path_split(path); + return -1; +} + +int mingw_kill(pid_t pid, int sig) +{ + if (pid > 0 && sig == SIGTERM) { + HANDLE h = OpenProcess(PROCESS_TERMINATE, FALSE, pid); + + if (TerminateProcess(h, -1)) { + CloseHandle(h); + return 0; + } + + errno = err_win_to_posix(GetLastError()); + CloseHandle(h); + return -1; + } else if (pid > 0 && sig == 0) { + HANDLE h = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid); + if (h) { + CloseHandle(h); + return 0; + } + } + + errno = EINVAL; + return -1; +} + +/* + * Compare environment entries by key (i.e. stopping at '=' or '\0'). + */ +static int compareenv(const void *v1, const void *v2) +{ + const char *e1 = *(const char**)v1; + const char *e2 = *(const char**)v2; + + for (;;) { + int c1 = *e1++; + int c2 = *e2++; + c1 = (c1 == '=') ? 0 : tolower(c1); + c2 = (c2 == '=') ? 0 : tolower(c2); + if (c1 > c2) + return 1; + if (c1 < c2) + return -1; + if (c1 == 0) + return 0; + } +} + +static int bsearchenv(char **env, const char *name, size_t size) +{ + unsigned low = 0, high = size; + while (low < high) { + unsigned mid = low + ((high - low) >> 1); + int cmp = compareenv(&env[mid], &name); + if (cmp < 0) + low = mid + 1; + else if (cmp > 0) + high = mid; + else + return mid; + } + return ~low; /* not found, return 1's complement of insert position */ +} + +/* + * If name contains '=', then sets the variable, otherwise it unsets it + * Size includes the terminating NULL. Env must have room for size + 1 entries + * (in case of insert). Returns the new size. Optionally frees removed entries. + */ +static int do_putenv(char **env, const char *name, int size, int free_old) +{ + int i = bsearchenv(env, name, size - 1); + + /* optionally free removed / replaced entry */ + if (i >= 0 && free_old) + free(env[i]); + + if (strchr(name, '=')) { + /* if new value ('key=value') is specified, insert or replace entry */ + if (i < 0) { + i = ~i; + memmove(&env[i + 1], &env[i], (size - i) * sizeof(char*)); + size++; + } + env[i] = (char*) name; + } else if (i >= 0) { + /* otherwise ('key') remove existing entry */ + size--; + memmove(&env[i], &env[i + 1], (size - i) * sizeof(char*)); + } + return size; +} + +char *mingw_getenv(const char *name) +{ + char *value; + int pos = bsearchenv(environ, name, environ_size - 1); + if (pos < 0) + return NULL; + value = strchr(environ[pos], '='); + return value ? &value[1] : NULL; +} + +int mingw_putenv(const char *namevalue) +{ + ALLOC_GROW(environ, (environ_size + 1) * sizeof(char*), environ_alloc); + environ_size = do_putenv(environ, namevalue, environ_size, 1); + return 0; +} + +/* + * Note, this isn't a complete replacement for getaddrinfo. It assumes + * that service contains a numerical port, or that it is null. It + * does a simple search using gethostbyname, and returns one IPv4 host + * if one was found. + */ +static int WSAAPI getaddrinfo_stub(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res) +{ + struct hostent *h = NULL; + struct addrinfo *ai; + struct sockaddr_in *sin; + + if (node) { + h = gethostbyname(node); + if (!h) + return WSAGetLastError(); + } + + ai = xmalloc(sizeof(struct addrinfo)); + *res = ai; + ai->ai_flags = 0; + ai->ai_family = AF_INET; + ai->ai_socktype = hints ? hints->ai_socktype : 0; + switch (ai->ai_socktype) { + case SOCK_STREAM: + ai->ai_protocol = IPPROTO_TCP; + break; + case SOCK_DGRAM: + ai->ai_protocol = IPPROTO_UDP; + break; + default: + ai->ai_protocol = 0; + break; + } + ai->ai_addrlen = sizeof(struct sockaddr_in); + if (hints && (hints->ai_flags & AI_CANONNAME)) + ai->ai_canonname = h ? xstrdup(h->h_name) : NULL; + else + ai->ai_canonname = NULL; + + sin = xcalloc(1, ai->ai_addrlen); + sin->sin_family = AF_INET; + /* Note: getaddrinfo is supposed to allow service to be a string, + * which should be looked up using getservbyname. This is + * currently not implemented */ + if (service) + sin->sin_port = htons(atoi(service)); + if (h) + sin->sin_addr = *(struct in_addr *)h->h_addr; + else if (hints && (hints->ai_flags & AI_PASSIVE)) + sin->sin_addr.s_addr = INADDR_ANY; + else + sin->sin_addr.s_addr = INADDR_LOOPBACK; + ai->ai_addr = (struct sockaddr *)sin; + ai->ai_next = NULL; + return 0; +} + +static void WSAAPI freeaddrinfo_stub(struct addrinfo *res) +{ + free(res->ai_canonname); + free(res->ai_addr); + free(res); +} + +static int WSAAPI getnameinfo_stub(const struct sockaddr *sa, socklen_t salen, + char *host, DWORD hostlen, + char *serv, DWORD servlen, int flags) +{ + const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; + if (sa->sa_family != AF_INET) + return EAI_FAMILY; + if (!host && !serv) + return EAI_NONAME; + + if (host && hostlen > 0) { + struct hostent *ent = NULL; + if (!(flags & NI_NUMERICHOST)) + ent = gethostbyaddr((const char *)&sin->sin_addr, + sizeof(sin->sin_addr), AF_INET); + + if (ent) + snprintf(host, hostlen, "%s", ent->h_name); + else if (flags & NI_NAMEREQD) + return EAI_NONAME; + else + snprintf(host, hostlen, "%s", inet_ntoa(sin->sin_addr)); + } + + if (serv && servlen > 0) { + struct servent *ent = NULL; + if (!(flags & NI_NUMERICSERV)) + ent = getservbyport(sin->sin_port, + flags & NI_DGRAM ? "udp" : "tcp"); + + if (ent) + snprintf(serv, servlen, "%s", ent->s_name); + else + snprintf(serv, servlen, "%d", ntohs(sin->sin_port)); + } + + return 0; +} + +static HMODULE ipv6_dll = NULL; +static void (WSAAPI *ipv6_freeaddrinfo)(struct addrinfo *res); +static int (WSAAPI *ipv6_getaddrinfo)(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res); +static int (WSAAPI *ipv6_getnameinfo)(const struct sockaddr *sa, socklen_t salen, + char *host, DWORD hostlen, + char *serv, DWORD servlen, int flags); +/* + * gai_strerror is an inline function in the ws2tcpip.h header, so we + * don't need to try to load that one dynamically. + */ + +static void socket_cleanup(void) +{ + WSACleanup(); + if (ipv6_dll) + FreeLibrary(ipv6_dll); + ipv6_dll = NULL; + ipv6_freeaddrinfo = freeaddrinfo_stub; + ipv6_getaddrinfo = getaddrinfo_stub; + ipv6_getnameinfo = getnameinfo_stub; +} + +static void ensure_socket_initialization(void) +{ + WSADATA wsa; + static int initialized = 0; + const char *libraries[] = { "ws2_32.dll", "wship6.dll", NULL }; + const char **name; + + if (initialized) + return; + + if (WSAStartup(MAKEWORD(2,2), &wsa)) + die("unable to initialize winsock subsystem, error %d", + WSAGetLastError()); + + for (name = libraries; *name; name++) { + ipv6_dll = LoadLibrary(*name); + if (!ipv6_dll) + continue; + + ipv6_freeaddrinfo = (void (WSAAPI *)(struct addrinfo *)) + GetProcAddress(ipv6_dll, "freeaddrinfo"); + ipv6_getaddrinfo = (int (WSAAPI *)(const char *, const char *, + const struct addrinfo *, + struct addrinfo **)) + GetProcAddress(ipv6_dll, "getaddrinfo"); + ipv6_getnameinfo = (int (WSAAPI *)(const struct sockaddr *, + socklen_t, char *, DWORD, + char *, DWORD, int)) + GetProcAddress(ipv6_dll, "getnameinfo"); + if (!ipv6_freeaddrinfo || !ipv6_getaddrinfo || !ipv6_getnameinfo) { + FreeLibrary(ipv6_dll); + ipv6_dll = NULL; + } else + break; + } + if (!ipv6_freeaddrinfo || !ipv6_getaddrinfo || !ipv6_getnameinfo) { + ipv6_freeaddrinfo = freeaddrinfo_stub; + ipv6_getaddrinfo = getaddrinfo_stub; + ipv6_getnameinfo = getnameinfo_stub; + } + + atexit(socket_cleanup); + initialized = 1; +} + +#undef gethostname +int mingw_gethostname(char *name, int namelen) +{ + ensure_socket_initialization(); + return gethostname(name, namelen); +} + +#undef gethostbyname +struct hostent *mingw_gethostbyname(const char *host) +{ + ensure_socket_initialization(); + return gethostbyname(host); +} + +void mingw_freeaddrinfo(struct addrinfo *res) +{ + ipv6_freeaddrinfo(res); +} + +int mingw_getaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, struct addrinfo **res) +{ + ensure_socket_initialization(); + return ipv6_getaddrinfo(node, service, hints, res); +} + +int mingw_getnameinfo(const struct sockaddr *sa, socklen_t salen, + char *host, DWORD hostlen, char *serv, DWORD servlen, + int flags) +{ + ensure_socket_initialization(); + return ipv6_getnameinfo(sa, salen, host, hostlen, serv, servlen, flags); +} + +int mingw_socket(int domain, int type, int protocol) +{ + int sockfd; + SOCKET s; + + ensure_socket_initialization(); + s = WSASocket(domain, type, protocol, NULL, 0, 0); + if (s == INVALID_SOCKET) { + /* + * WSAGetLastError() values are regular BSD error codes + * biased by WSABASEERR. + * However, strerror() does not know about networking + * specific errors, which are values beginning at 38 or so. + * Therefore, we choose to leave the biased error code + * in errno so that _if_ someone looks up the code somewhere, + * then it is at least the number that are usually listed. + */ + errno = WSAGetLastError(); + return -1; + } + /* convert into a file descriptor */ + if ((sockfd = _open_osfhandle(s, O_RDWR|O_BINARY)) < 0) { + closesocket(s); + return error("unable to make a socket file descriptor: %s", + strerror(errno)); + } + return sockfd; +} + +#undef connect +int mingw_connect(int sockfd, struct sockaddr *sa, size_t sz) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return connect(s, sa, sz); +} + +#undef bind +int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return bind(s, sa, sz); +} + +#undef setsockopt +int mingw_setsockopt(int sockfd, int lvl, int optname, void *optval, int optlen) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return setsockopt(s, lvl, optname, (const char*)optval, optlen); +} + +#undef shutdown +int mingw_shutdown(int sockfd, int how) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return shutdown(s, how); +} + +#undef listen +int mingw_listen(int sockfd, int backlog) +{ + SOCKET s = (SOCKET)_get_osfhandle(sockfd); + return listen(s, backlog); +} + +#undef accept +int mingw_accept(int sockfd1, struct sockaddr *sa, socklen_t *sz) +{ + int sockfd2; + + SOCKET s1 = (SOCKET)_get_osfhandle(sockfd1); + SOCKET s2 = accept(s1, sa, sz); + + /* convert into a file descriptor */ + if ((sockfd2 = _open_osfhandle(s2, O_RDWR|O_BINARY)) < 0) { + int err = errno; + closesocket(s2); + return error("unable to make a socket file descriptor: %s", + strerror(err)); + } + return sockfd2; +} + +#undef rename +int mingw_rename(const char *pold, const char *pnew) +{ + DWORD attrs, gle; + int tries = 0; + wchar_t wpold[MAX_PATH], wpnew[MAX_PATH]; + if (xutftowcs_path(wpold, pold) < 0 || xutftowcs_path(wpnew, pnew) < 0) + return -1; + + /* + * Try native rename() first to get errno right. + * It is based on MoveFile(), which cannot overwrite existing files. + */ + if (!_wrename(wpold, wpnew)) + return 0; + if (errno != EEXIST) + return -1; +repeat: + if (MoveFileExW(wpold, wpnew, MOVEFILE_REPLACE_EXISTING)) + return 0; + /* TODO: translate more errors */ + gle = GetLastError(); + if (gle == ERROR_ACCESS_DENIED && + (attrs = GetFileAttributesW(wpnew)) != INVALID_FILE_ATTRIBUTES) { + if (attrs & FILE_ATTRIBUTE_DIRECTORY) { + errno = EISDIR; + return -1; + } + if ((attrs & FILE_ATTRIBUTE_READONLY) && + SetFileAttributesW(wpnew, attrs & ~FILE_ATTRIBUTE_READONLY)) { + if (MoveFileExW(wpold, wpnew, MOVEFILE_REPLACE_EXISTING)) + return 0; + gle = GetLastError(); + /* revert file attributes on failure */ + SetFileAttributesW(wpnew, attrs); + } + } + if (tries < ARRAY_SIZE(delay) && gle == ERROR_ACCESS_DENIED) { + /* + * We assume that some other process had the source or + * destination file open at the wrong moment and retry. + * In order to give the other process a higher chance to + * complete its operation, we give up our time slice now. + * If we have to retry again, we do sleep a bit. + */ + Sleep(delay[tries]); + tries++; + goto repeat; + } + if (gle == ERROR_ACCESS_DENIED && + ask_yes_no_if_possible("Rename from '%s' to '%s' failed. " + "Should I try again?", pold, pnew)) + goto repeat; + + errno = EACCES; + return -1; +} + +/* + * Note that this doesn't return the actual pagesize, but + * the allocation granularity. If future Windows specific git code + * needs the real getpagesize function, we need to find another solution. + */ +int mingw_getpagesize(void) +{ + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwAllocationGranularity; +} + +struct passwd *getpwuid(int uid) +{ + static char user_name[100]; + static struct passwd p; + + DWORD len = sizeof(user_name); + if (!GetUserName(user_name, &len)) + return NULL; + p.pw_name = user_name; + p.pw_gecos = "unknown"; + p.pw_dir = NULL; + return &p; +} + +static HANDLE timer_event; +static HANDLE timer_thread; +static int timer_interval; +static int one_shot; +static sig_handler_t timer_fn = SIG_DFL, sigint_fn = SIG_DFL; + +/* The timer works like this: + * The thread, ticktack(), is a trivial routine that most of the time + * only waits to receive the signal to terminate. The main thread tells + * the thread to terminate by setting the timer_event to the signalled + * state. + * But ticktack() interrupts the wait state after the timer's interval + * length to call the signal handler. + */ + +static unsigned __stdcall ticktack(void *dummy) +{ + while (WaitForSingleObject(timer_event, timer_interval) == WAIT_TIMEOUT) { + mingw_raise(SIGALRM); + if (one_shot) + break; + } + return 0; +} + +static int start_timer_thread(void) +{ + timer_event = CreateEvent(NULL, FALSE, FALSE, NULL); + if (timer_event) { + timer_thread = (HANDLE) _beginthreadex(NULL, 0, ticktack, NULL, 0, NULL); + if (!timer_thread ) + return errno = ENOMEM, + error("cannot start timer thread"); + } else + return errno = ENOMEM, + error("cannot allocate resources for timer"); + return 0; +} + +static void stop_timer_thread(void) +{ + if (timer_event) + SetEvent(timer_event); /* tell thread to terminate */ + if (timer_thread) { + int rc = WaitForSingleObject(timer_thread, 1000); + if (rc == WAIT_TIMEOUT) + error("timer thread did not terminate timely"); + else if (rc != WAIT_OBJECT_0) + error("waiting for timer thread failed: %lu", + GetLastError()); + CloseHandle(timer_thread); + } + if (timer_event) + CloseHandle(timer_event); + timer_event = NULL; + timer_thread = NULL; +} + +static inline int is_timeval_eq(const struct timeval *i1, const struct timeval *i2) +{ + return i1->tv_sec == i2->tv_sec && i1->tv_usec == i2->tv_usec; +} + +int setitimer(int type, struct itimerval *in, struct itimerval *out) +{ + static const struct timeval zero; + static int atexit_done; + + if (out != NULL) + return errno = EINVAL, + error("setitimer param 3 != NULL not implemented"); + if (!is_timeval_eq(&in->it_interval, &zero) && + !is_timeval_eq(&in->it_interval, &in->it_value)) + return errno = EINVAL, + error("setitimer: it_interval must be zero or eq it_value"); + + if (timer_thread) + stop_timer_thread(); + + if (is_timeval_eq(&in->it_value, &zero) && + is_timeval_eq(&in->it_interval, &zero)) + return 0; + + timer_interval = in->it_value.tv_sec * 1000 + in->it_value.tv_usec / 1000; + one_shot = is_timeval_eq(&in->it_interval, &zero); + if (!atexit_done) { + atexit(stop_timer_thread); + atexit_done = 1; + } + return start_timer_thread(); +} + +int sigaction(int sig, struct sigaction *in, struct sigaction *out) +{ + if (sig != SIGALRM) + return errno = EINVAL, + error("sigaction only implemented for SIGALRM"); + if (out != NULL) + return errno = EINVAL, + error("sigaction: param 3 != NULL not implemented"); + + timer_fn = in->sa_handler; + return 0; +} + +#undef signal +sig_handler_t mingw_signal(int sig, sig_handler_t handler) +{ + sig_handler_t old; + + switch (sig) { + case SIGALRM: + old = timer_fn; + timer_fn = handler; + break; + + case SIGINT: + old = sigint_fn; + sigint_fn = handler; + break; + + default: + return signal(sig, handler); + } + + return old; +} + +#undef raise +int mingw_raise(int sig) +{ + switch (sig) { + case SIGALRM: + if (timer_fn == SIG_DFL) { + if (isatty(STDERR_FILENO)) + fputs("Alarm clock\n", stderr); + exit(128 + SIGALRM); + } else if (timer_fn != SIG_IGN) + timer_fn(SIGALRM); + return 0; + + case SIGINT: + if (sigint_fn == SIG_DFL) + exit(128 + SIGINT); + else if (sigint_fn != SIG_IGN) + sigint_fn(SIGINT); + return 0; + + default: + return raise(sig); + } +} + + +static const char *make_backslash_path(const char *path) +{ + static char buf[PATH_MAX + 1]; + char *c; + + if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX) + die("Too long path: %.*s", 60, path); + + for (c = buf; *c; c++) { + if (*c == '/') + *c = '\\'; + } + return buf; +} + +void mingw_open_html(const char *unixpath) +{ + const char *htmlpath = make_backslash_path(unixpath); + typedef HINSTANCE (WINAPI *T)(HWND, const char *, + const char *, const char *, const char *, INT); + T ShellExecute; + HMODULE shell32; + int r; + + shell32 = LoadLibrary("shell32.dll"); + if (!shell32) + die("cannot load shell32.dll"); + ShellExecute = (T)GetProcAddress(shell32, "ShellExecuteA"); + if (!ShellExecute) + die("cannot run browser"); + + printf("Launching default browser to display HTML ...\n"); + r = (int)ShellExecute(NULL, "open", htmlpath, NULL, "\\", SW_SHOWNORMAL); + FreeLibrary(shell32); + /* see the MSDN documentation referring to the result codes here */ + if (r <= 32) { + die("failed to launch browser for %.*s", MAX_PATH, unixpath); + } +} + +int link(const char *oldpath, const char *newpath) +{ + typedef BOOL (WINAPI *T)(LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES); + static T create_hard_link = NULL; + wchar_t woldpath[MAX_PATH], wnewpath[MAX_PATH]; + if (xutftowcs_path(woldpath, oldpath) < 0 || + xutftowcs_path(wnewpath, newpath) < 0) + return -1; + + if (!create_hard_link) { + create_hard_link = (T) GetProcAddress( + GetModuleHandle("kernel32.dll"), "CreateHardLinkW"); + if (!create_hard_link) + create_hard_link = (T)-1; + } + if (create_hard_link == (T)-1) { + errno = ENOSYS; + return -1; + } + if (!create_hard_link(wnewpath, woldpath, NULL)) { + errno = err_win_to_posix(GetLastError()); + return -1; + } + return 0; +} + +pid_t waitpid(pid_t pid, int *status, int options) +{ + HANDLE h = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION, + FALSE, pid); + if (!h) { + errno = ECHILD; + return -1; + } + + if (pid > 0 && options & WNOHANG) { + if (WAIT_OBJECT_0 != WaitForSingleObject(h, 0)) { + CloseHandle(h); + return 0; + } + options &= ~WNOHANG; + } + + if (options == 0) { + struct pinfo_t **ppinfo; + if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) { + CloseHandle(h); + return 0; + } + + if (status) + GetExitCodeProcess(h, (LPDWORD)status); + + EnterCriticalSection(&pinfo_cs); + + ppinfo = &pinfo; + while (*ppinfo) { + struct pinfo_t *info = *ppinfo; + if (info->pid == pid) { + CloseHandle(info->proc); + *ppinfo = info->next; + free(info); + break; + } + ppinfo = &info->next; + } + + LeaveCriticalSection(&pinfo_cs); + + CloseHandle(h); + return pid; + } + CloseHandle(h); + + errno = EINVAL; + return -1; +} + +int mingw_offset_1st_component(const char *path) +{ + int offset = 0; + if (has_dos_drive_prefix(path)) + offset = 2; + + /* unc paths */ + else if (is_dir_sep(path[0]) && is_dir_sep(path[1])) { + + /* skip server name */ + char *pos = strpbrk(path + 2, "\\/"); + if (!pos) + return 0; /* Error: malformed unc path */ + + do { + pos++; + } while (*pos && !is_dir_sep(*pos)); + + offset = pos - path; + } + + return offset + is_dir_sep(path[offset]); +} + +int xutftowcsn(wchar_t *wcs, const char *utfs, size_t wcslen, int utflen) +{ + int upos = 0, wpos = 0; + const unsigned char *utf = (const unsigned char*) utfs; + if (!utf || !wcs || wcslen < 1) { + errno = EINVAL; + return -1; + } + /* reserve space for \0 */ + wcslen--; + if (utflen < 0) + utflen = INT_MAX; + + while (upos < utflen) { + int c = utf[upos++] & 0xff; + if (utflen == INT_MAX && c == 0) + break; + + if (wpos >= wcslen) { + wcs[wpos] = 0; + errno = ERANGE; + return -1; + } + + if (c < 0x80) { + /* ASCII */ + wcs[wpos++] = c; + } else if (c >= 0xc2 && c < 0xe0 && upos < utflen && + (utf[upos] & 0xc0) == 0x80) { + /* 2-byte utf-8 */ + c = ((c & 0x1f) << 6); + c |= (utf[upos++] & 0x3f); + wcs[wpos++] = c; + } else if (c >= 0xe0 && c < 0xf0 && upos + 1 < utflen && + !(c == 0xe0 && utf[upos] < 0xa0) && /* over-long encoding */ + (utf[upos] & 0xc0) == 0x80 && + (utf[upos + 1] & 0xc0) == 0x80) { + /* 3-byte utf-8 */ + c = ((c & 0x0f) << 12); + c |= ((utf[upos++] & 0x3f) << 6); + c |= (utf[upos++] & 0x3f); + wcs[wpos++] = c; + } else if (c >= 0xf0 && c < 0xf5 && upos + 2 < utflen && + wpos + 1 < wcslen && + !(c == 0xf0 && utf[upos] < 0x90) && /* over-long encoding */ + !(c == 0xf4 && utf[upos] >= 0x90) && /* > \u10ffff */ + (utf[upos] & 0xc0) == 0x80 && + (utf[upos + 1] & 0xc0) == 0x80 && + (utf[upos + 2] & 0xc0) == 0x80) { + /* 4-byte utf-8: convert to \ud8xx \udcxx surrogate pair */ + c = ((c & 0x07) << 18); + c |= ((utf[upos++] & 0x3f) << 12); + c |= ((utf[upos++] & 0x3f) << 6); + c |= (utf[upos++] & 0x3f); + c -= 0x10000; + wcs[wpos++] = 0xd800 | (c >> 10); + wcs[wpos++] = 0xdc00 | (c & 0x3ff); + } else if (c >= 0xa0) { + /* invalid utf-8 byte, printable unicode char: convert 1:1 */ + wcs[wpos++] = c; + } else { + /* invalid utf-8 byte, non-printable unicode: convert to hex */ + static const char *hex = "0123456789abcdef"; + wcs[wpos++] = hex[c >> 4]; + if (wpos < wcslen) + wcs[wpos++] = hex[c & 0x0f]; + } + } + wcs[wpos] = 0; + return wpos; +} + +int xwcstoutf(char *utf, const wchar_t *wcs, size_t utflen) +{ + if (!wcs || !utf || utflen < 1) { + errno = EINVAL; + return -1; + } + utflen = WideCharToMultiByte(CP_UTF8, 0, wcs, -1, utf, utflen, NULL, NULL); + if (utflen) + return utflen - 1; + errno = ERANGE; + return -1; +} + +/* + * Disable MSVCRT command line wildcard expansion (__getmainargs called from + * mingw startup code, see init.c in mingw runtime). + */ +int _CRT_glob = 0; + +typedef struct { + int newmode; +} _startupinfo; + +extern int __wgetmainargs(int *argc, wchar_t ***argv, wchar_t ***env, int glob, + _startupinfo *si); + +static NORETURN void die_startup() +{ + fputs("fatal: not enough memory for initialization", stderr); + exit(128); +} + +static void *malloc_startup(size_t size) +{ + void *result = malloc(size); + if (!result) + die_startup(); + return result; +} + +static char *wcstoutfdup_startup(char *buffer, const wchar_t *wcs, size_t len) +{ + len = xwcstoutf(buffer, wcs, len) + 1; + return memcpy(malloc_startup(len), buffer, len); +} + +void mingw_startup() +{ + int i, maxlen, argc; + char *buffer; + wchar_t **wenv, **wargv; + _startupinfo si; + + /* get wide char arguments and environment */ + si.newmode = 0; + if (__wgetmainargs(&argc, &wargv, &wenv, _CRT_glob, &si) < 0) + die_startup(); + + /* determine size of argv and environ conversion buffer */ + maxlen = wcslen(_wpgmptr); + for (i = 1; i < argc; i++) + maxlen = max(maxlen, wcslen(wargv[i])); + for (i = 0; wenv[i]; i++) + maxlen = max(maxlen, wcslen(wenv[i])); + + /* + * nedmalloc can't free CRT memory, allocate resizable environment + * list. Note that xmalloc / xmemdupz etc. call getenv, so we cannot + * use it while initializing the environment itself. + */ + environ_size = i + 1; + environ_alloc = alloc_nr(environ_size * sizeof(char*)); + environ = malloc_startup(environ_alloc); + + /* allocate buffer (wchar_t encodes to max 3 UTF-8 bytes) */ + maxlen = 3 * maxlen + 1; + buffer = malloc_startup(maxlen); + + /* convert command line arguments and environment to UTF-8 */ + __argv[0] = wcstoutfdup_startup(buffer, _wpgmptr, maxlen); + for (i = 1; i < argc; i++) + __argv[i] = wcstoutfdup_startup(buffer, wargv[i], maxlen); + for (i = 0; wenv[i]; i++) + environ[i] = wcstoutfdup_startup(buffer, wenv[i], maxlen); + environ[i] = NULL; + free(buffer); + + /* sort environment for O(log n) getenv / putenv */ + qsort(environ, i, sizeof(char*), compareenv); + + /* fix Windows specific environment settings */ + + /* on Windows it is TMP and TEMP */ + if (!mingw_getenv("TMPDIR")) { + const char *tmp = mingw_getenv("TMP"); + if (!tmp) + tmp = mingw_getenv("TEMP"); + if (tmp) + setenv("TMPDIR", tmp, 1); + } + + /* simulate TERM to enable auto-color (see color.c) */ + if (!getenv("TERM")) + setenv("TERM", "cygwin", 1); + + /* initialize critical section for waitpid pinfo_t list */ + InitializeCriticalSection(&pinfo_cs); + + /* set up default file mode and file modes for stdin/out/err */ + _fmode = _O_BINARY; + _setmode(_fileno(stdin), _O_BINARY); + _setmode(_fileno(stdout), _O_BINARY); + _setmode(_fileno(stderr), _O_BINARY); + + /* initialize Unicode console */ + winansi_init(); +} diff --git a/compat/mingw.h b/compat/mingw.h new file mode 100644 index 0000000000..df0e3203ab --- /dev/null +++ b/compat/mingw.h @@ -0,0 +1,494 @@ +#include <winsock2.h> +#include <ws2tcpip.h> + +/* + * things that are not available in header files + */ + +typedef int pid_t; +typedef int uid_t; +typedef int socklen_t; +#define hstrerror strerror + +#define S_IFLNK 0120000 /* Symbolic link */ +#define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK) +#define S_ISSOCK(x) 0 + +#define S_IRGRP 0 +#define S_IWGRP 0 +#define S_IXGRP 0 +#define S_IRWXG (S_IRGRP | S_IWGRP | S_IXGRP) +#define S_IROTH 0 +#define S_IWOTH 0 +#define S_IXOTH 0 +#define S_IRWXO (S_IROTH | S_IWOTH | S_IXOTH) + +#define S_ISUID 0004000 +#define S_ISGID 0002000 +#define S_ISVTX 0001000 + +#define WIFEXITED(x) 1 +#define WIFSIGNALED(x) 0 +#define WEXITSTATUS(x) ((x) & 0xff) +#define WTERMSIG(x) SIGTERM + +#ifndef EWOULDBLOCK +#define EWOULDBLOCK EAGAIN +#endif +#ifndef ELOOP +#define ELOOP EMLINK +#endif +#define SHUT_WR SD_SEND + +#define SIGHUP 1 +#define SIGQUIT 3 +#define SIGKILL 9 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGCHLD 17 + +#define F_GETFD 1 +#define F_SETFD 2 +#define FD_CLOEXEC 0x1 + +#ifndef EAFNOSUPPORT +#define EAFNOSUPPORT WSAEAFNOSUPPORT +#endif +#ifndef ECONNABORTED +#define ECONNABORTED WSAECONNABORTED +#endif + +struct passwd { + char *pw_name; + char *pw_gecos; + char *pw_dir; +}; + +typedef void (__cdecl *sig_handler_t)(int); +struct sigaction { + sig_handler_t sa_handler; + unsigned sa_flags; +}; +#define sigemptyset(x) (void)0 +#define SA_RESTART 0 + +struct itimerval { + struct timeval it_value, it_interval; +}; +#define ITIMER_REAL 0 + +/* + * sanitize preprocessor namespace polluted by Windows headers defining + * macros which collide with git local versions + */ +#undef HELP_COMMAND /* from winuser.h */ + +/* + * trivial stubs + */ + +static inline int readlink(const char *path, char *buf, size_t bufsiz) +{ errno = ENOSYS; return -1; } +static inline int symlink(const char *oldpath, const char *newpath) +{ errno = ENOSYS; return -1; } +static inline int fchmod(int fildes, mode_t mode) +{ errno = ENOSYS; return -1; } +static inline pid_t fork(void) +{ errno = ENOSYS; return -1; } +static inline unsigned int alarm(unsigned int seconds) +{ return 0; } +static inline int fsync(int fd) +{ return _commit(fd); } +static inline pid_t getppid(void) +{ return 1; } +static inline void sync(void) +{} +static inline uid_t getuid(void) +{ return 1; } +static inline struct passwd *getpwnam(const char *name) +{ return NULL; } +static inline int fcntl(int fd, int cmd, ...) +{ + if (cmd == F_GETFD || cmd == F_SETFD) + return 0; + errno = EINVAL; + return -1; +} +/* bash cannot reliably detect negative return codes as failure */ +#define exit(code) exit((code) & 0xff) + +/* + * simple adaptors + */ + +int mingw_mkdir(const char *path, int mode); +#define mkdir mingw_mkdir + +#define WNOHANG 1 +pid_t waitpid(pid_t pid, int *status, int options); + +#define kill mingw_kill +int mingw_kill(pid_t pid, int sig); + +#ifndef NO_OPENSSL +#include <openssl/ssl.h> +static inline int mingw_SSL_set_fd(SSL *ssl, int fd) +{ + return SSL_set_fd(ssl, _get_osfhandle(fd)); +} +#define SSL_set_fd mingw_SSL_set_fd + +static inline int mingw_SSL_set_rfd(SSL *ssl, int fd) +{ + return SSL_set_rfd(ssl, _get_osfhandle(fd)); +} +#define SSL_set_rfd mingw_SSL_set_rfd + +static inline int mingw_SSL_set_wfd(SSL *ssl, int fd) +{ + return SSL_set_wfd(ssl, _get_osfhandle(fd)); +} +#define SSL_set_wfd mingw_SSL_set_wfd +#endif + +/* + * implementations of missing functions + */ + +int pipe(int filedes[2]); +unsigned int sleep (unsigned int seconds); +int mkstemp(char *template); +int gettimeofday(struct timeval *tv, void *tz); +struct tm *gmtime_r(const time_t *timep, struct tm *result); +struct tm *localtime_r(const time_t *timep, struct tm *result); +int getpagesize(void); /* defined in MinGW's libgcc.a */ +struct passwd *getpwuid(uid_t uid); +int setitimer(int type, struct itimerval *in, struct itimerval *out); +int sigaction(int sig, struct sigaction *in, struct sigaction *out); +int link(const char *oldpath, const char *newpath); + +/* + * replacements of existing functions + */ + +int mingw_unlink(const char *pathname); +#define unlink mingw_unlink + +int mingw_rmdir(const char *path); +#define rmdir mingw_rmdir + +int mingw_open (const char *filename, int oflags, ...); +#define open mingw_open + +int mingw_fgetc(FILE *stream); +#define fgetc mingw_fgetc + +FILE *mingw_fopen (const char *filename, const char *otype); +#define fopen mingw_fopen + +FILE *mingw_freopen (const char *filename, const char *otype, FILE *stream); +#define freopen mingw_freopen + +int mingw_fflush(FILE *stream); +#define fflush mingw_fflush + +int mingw_access(const char *filename, int mode); +#undef access +#define access mingw_access + +int mingw_chdir(const char *dirname); +#define chdir mingw_chdir + +int mingw_chmod(const char *filename, int mode); +#define chmod mingw_chmod + +char *mingw_mktemp(char *template); +#define mktemp mingw_mktemp + +char *mingw_getcwd(char *pointer, int len); +#define getcwd mingw_getcwd + +char *mingw_getenv(const char *name); +#define getenv mingw_getenv +int mingw_putenv(const char *namevalue); +#define putenv mingw_putenv +#define unsetenv mingw_putenv + +int mingw_gethostname(char *host, int namelen); +#define gethostname mingw_gethostname + +struct hostent *mingw_gethostbyname(const char *host); +#define gethostbyname mingw_gethostbyname + +void mingw_freeaddrinfo(struct addrinfo *res); +#define freeaddrinfo mingw_freeaddrinfo + +int mingw_getaddrinfo(const char *node, const char *service, + const struct addrinfo *hints, struct addrinfo **res); +#define getaddrinfo mingw_getaddrinfo + +int mingw_getnameinfo(const struct sockaddr *sa, socklen_t salen, + char *host, DWORD hostlen, char *serv, DWORD servlen, + int flags); +#define getnameinfo mingw_getnameinfo + +int mingw_socket(int domain, int type, int protocol); +#define socket mingw_socket + +int mingw_connect(int sockfd, struct sockaddr *sa, size_t sz); +#define connect mingw_connect + +int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz); +#define bind mingw_bind + +int mingw_setsockopt(int sockfd, int lvl, int optname, void *optval, int optlen); +#define setsockopt mingw_setsockopt + +int mingw_shutdown(int sockfd, int how); +#define shutdown mingw_shutdown + +int mingw_listen(int sockfd, int backlog); +#define listen mingw_listen + +int mingw_accept(int sockfd, struct sockaddr *sa, socklen_t *sz); +#define accept mingw_accept + +int mingw_rename(const char*, const char*); +#define rename mingw_rename + +#if defined(USE_WIN32_MMAP) || defined(_MSC_VER) +int mingw_getpagesize(void); +#define getpagesize mingw_getpagesize +#endif + +struct rlimit { + unsigned int rlim_cur; +}; +#define RLIMIT_NOFILE 0 + +static inline int getrlimit(int resource, struct rlimit *rlp) +{ + if (resource != RLIMIT_NOFILE) { + errno = EINVAL; + return -1; + } + + rlp->rlim_cur = 2048; + return 0; +} + +/* + * Use mingw specific stat()/lstat()/fstat() implementations on Windows. + */ +#define off_t off64_t +#define lseek _lseeki64 + +/* use struct stat with 64 bit st_size */ +#ifdef stat +#undef stat +#endif +#define stat _stati64 +int mingw_lstat(const char *file_name, struct stat *buf); +int mingw_stat(const char *file_name, struct stat *buf); +int mingw_fstat(int fd, struct stat *buf); +#ifdef fstat +#undef fstat +#endif +#define fstat mingw_fstat +#ifdef lstat +#undef lstat +#endif +#define lstat mingw_lstat + +#ifndef _stati64 +# define _stati64(x,y) mingw_stat(x,y) +#elif defined (_USE_32BIT_TIME_T) +# define _stat32i64(x,y) mingw_stat(x,y) +#else +# define _stat64(x,y) mingw_stat(x,y) +#endif + +int mingw_utime(const char *file_name, const struct utimbuf *times); +#define utime mingw_utime + +pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **env, + const char *dir, + int fhin, int fhout, int fherr); +int mingw_execvp(const char *cmd, char *const *argv); +#define execvp mingw_execvp +int mingw_execv(const char *cmd, char *const *argv); +#define execv mingw_execv + +static inline unsigned int git_ntohl(unsigned int x) +{ return (unsigned int)ntohl(x); } +#define ntohl git_ntohl + +sig_handler_t mingw_signal(int sig, sig_handler_t handler); +#define signal mingw_signal + +int mingw_raise(int sig); +#define raise mingw_raise + +/* + * ANSI emulation wrappers + */ + +void winansi_init(void); +HANDLE winansi_get_osfhandle(int fd); + +/* + * git specific compatibility + */ + +#define has_dos_drive_prefix(path) (isalpha(*(path)) && (path)[1] == ':') +#define is_dir_sep(c) ((c) == '/' || (c) == '\\') +static inline char *mingw_find_last_dir_sep(const char *path) +{ + char *ret = NULL; + for (; *path; ++path) + if (is_dir_sep(*path)) + ret = (char *)path; + return ret; +} +#define find_last_dir_sep mingw_find_last_dir_sep +int mingw_offset_1st_component(const char *path); +#define offset_1st_component mingw_offset_1st_component +#define PATH_SEP ';' +#define PRIuMAX "I64u" +#define PRId64 "I64d" + +void mingw_open_html(const char *path); +#define open_html mingw_open_html + +void mingw_mark_as_git_dir(const char *dir); +#define mark_as_git_dir mingw_mark_as_git_dir + +/** + * Converts UTF-8 encoded string to UTF-16LE. + * + * To support repositories with legacy-encoded file names, invalid UTF-8 bytes + * 0xa0 - 0xff are converted to corresponding printable Unicode chars \u00a0 - + * \u00ff, and invalid UTF-8 bytes 0x80 - 0x9f (which would make non-printable + * Unicode) are converted to hex-code. + * + * Lead-bytes not followed by an appropriate number of trail-bytes, over-long + * encodings and 4-byte encodings > \u10ffff are detected as invalid UTF-8. + * + * Maximum space requirement for the target buffer is two wide chars per UTF-8 + * char (((strlen(utf) * 2) + 1) [* sizeof(wchar_t)]). + * + * The maximum space is needed only if the entire input string consists of + * invalid UTF-8 bytes in range 0x80-0x9f, as per the following table: + * + * | | UTF-8 | UTF-16 | + * Code point | UTF-8 sequence | bytes | words | ratio + * --------------+-------------------+-------+--------+------- + * 000000-00007f | 0-7f | 1 | 1 | 1 + * 000080-0007ff | c2-df + 80-bf | 2 | 1 | 0.5 + * 000800-00ffff | e0-ef + 2 * 80-bf | 3 | 1 | 0.33 + * 010000-10ffff | f0-f4 + 3 * 80-bf | 4 | 2 (a) | 0.5 + * invalid | 80-9f | 1 | 2 (b) | 2 + * invalid | a0-ff | 1 | 1 | 1 + * + * (a) encoded as UTF-16 surrogate pair + * (b) encoded as two hex digits + * + * Note that, while the UTF-8 encoding scheme can be extended to 5-byte, 6-byte + * or even indefinite-byte sequences, the largest valid code point \u10ffff + * encodes as only 4 UTF-8 bytes. + * + * Parameters: + * wcs: wide char target buffer + * utf: string to convert + * wcslen: size of target buffer (in wchar_t's) + * utflen: size of string to convert, or -1 if 0-terminated + * + * Returns: + * length of converted string (_wcslen(wcs)), or -1 on failure + * + * Errors: + * EINVAL: one of the input parameters is invalid (e.g. NULL) + * ERANGE: the output buffer is too small + */ +int xutftowcsn(wchar_t *wcs, const char *utf, size_t wcslen, int utflen); + +/** + * Simplified variant of xutftowcsn, assumes input string is \0-terminated. + */ +static inline int xutftowcs(wchar_t *wcs, const char *utf, size_t wcslen) +{ + return xutftowcsn(wcs, utf, wcslen, -1); +} + +/** + * Simplified file system specific variant of xutftowcsn, assumes output + * buffer size is MAX_PATH wide chars and input string is \0-terminated, + * fails with ENAMETOOLONG if input string is too long. + */ +static inline int xutftowcs_path(wchar_t *wcs, const char *utf) +{ + int result = xutftowcsn(wcs, utf, MAX_PATH, -1); + if (result < 0 && errno == ERANGE) + errno = ENAMETOOLONG; + return result; +} + +/** + * Converts UTF-16LE encoded string to UTF-8. + * + * Maximum space requirement for the target buffer is three UTF-8 chars per + * wide char ((_wcslen(wcs) * 3) + 1). + * + * The maximum space is needed only if the entire input string consists of + * UTF-16 words in range 0x0800-0xd7ff or 0xe000-0xffff (i.e. \u0800-\uffff + * modulo surrogate pairs), as per the following table: + * + * | | UTF-16 | UTF-8 | + * Code point | UTF-16 sequence | words | bytes | ratio + * --------------+-----------------------+--------+-------+------- + * 000000-00007f | 0000-007f | 1 | 1 | 1 + * 000080-0007ff | 0080-07ff | 1 | 2 | 2 + * 000800-00ffff | 0800-d7ff / e000-ffff | 1 | 3 | 3 + * 010000-10ffff | d800-dbff + dc00-dfff | 2 | 4 | 2 + * + * Note that invalid code points > 10ffff cannot be represented in UTF-16. + * + * Parameters: + * utf: target buffer + * wcs: wide string to convert + * utflen: size of target buffer + * + * Returns: + * length of converted string, or -1 on failure + * + * Errors: + * EINVAL: one of the input parameters is invalid (e.g. NULL) + * ERANGE: the output buffer is too small + */ +int xwcstoutf(char *utf, const wchar_t *wcs, size_t utflen); + +/* + * A critical section used in the implementation of the spawn + * functions (mingw_spawnv[p]e()) and waitpid(). Intialised in + * the replacement main() macro below. + */ +extern CRITICAL_SECTION pinfo_cs; + +/* + * A replacement of main() that adds win32 specific initialization. + */ + +void mingw_startup(); +#define main(c,v) dummy_decl_mingw_main(); \ +static int mingw_main(c,v); \ +int main(int argc, char **argv) \ +{ \ + mingw_startup(); \ + return mingw_main(__argc, (void *)__argv); \ +} \ +static int mingw_main(c,v) + +/* + * Used by Pthread API implementation for Windows + */ +extern int err_win_to_posix(DWORD winerr); diff --git a/compat/mkdir.c b/compat/mkdir.c new file mode 100644 index 0000000000..9e253fb72f --- /dev/null +++ b/compat/mkdir.c @@ -0,0 +1,24 @@ +#include "../git-compat-util.h" +#undef mkdir + +/* for platforms that can't deal with a trailing '/' */ +int compat_mkdir_wo_trailing_slash(const char *dir, mode_t mode) +{ + int retval; + char *tmp_dir = NULL; + size_t len = strlen(dir); + + if (len && dir[len-1] == '/') { + if ((tmp_dir = strdup(dir)) == NULL) + return -1; + tmp_dir[len-1] = '\0'; + } + else + tmp_dir = (char *)dir; + + retval = mkdir(tmp_dir, mode); + if (tmp_dir != dir) + free(tmp_dir); + + return retval; +} diff --git a/compat/mkdtemp.c b/compat/mkdtemp.c new file mode 100644 index 0000000000..1136119592 --- /dev/null +++ b/compat/mkdtemp.c @@ -0,0 +1,8 @@ +#include "../git-compat-util.h" + +char *gitmkdtemp(char *template) +{ + if (!*mktemp(template) || mkdir(template, 0700)) + return NULL; + return template; +} diff --git a/compat/mmap.c b/compat/mmap.c new file mode 100644 index 0000000000..7f662fef7b --- /dev/null +++ b/compat/mmap.c @@ -0,0 +1,40 @@ +#include "../git-compat-util.h" + +void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset) +{ + size_t n = 0; + + if (start != NULL || !(flags & MAP_PRIVATE)) + die("Invalid usage of mmap when built with NO_MMAP"); + + start = xmalloc(length); + if (start == NULL) { + errno = ENOMEM; + return MAP_FAILED; + } + + while (n < length) { + ssize_t count = xpread(fd, (char *)start + n, length - n, offset + n); + + if (count == 0) { + memset((char *)start+n, 0, length-n); + break; + } + + if (count < 0) { + free(start); + errno = EACCES; + return MAP_FAILED; + } + + n += count; + } + + return start; +} + +int git_munmap(void *start, size_t length) +{ + free(start); + return 0; +} diff --git a/compat/msvc.c b/compat/msvc.c new file mode 100644 index 0000000000..71843d7eef --- /dev/null +++ b/compat/msvc.c @@ -0,0 +1,6 @@ +#include "../git-compat-util.h" +#include "win32.h" +#include <conio.h> +#include "../strbuf.h" + +#include "mingw.c" diff --git a/compat/msvc.h b/compat/msvc.h new file mode 100644 index 0000000000..580bb55bf4 --- /dev/null +++ b/compat/msvc.h @@ -0,0 +1,29 @@ +#ifndef __MSVC__HEAD +#define __MSVC__HEAD + +#include <direct.h> +#include <process.h> +#include <malloc.h> +#include <io.h> + +/* porting function */ +#define inline __inline +#define __inline__ __inline +#define __attribute__(x) +#define strncasecmp _strnicmp +#define ftruncate _chsize +#define strtoull _strtoui64 +#define strtoll _strtoi64 + +static __inline int strcasecmp (const char *s1, const char *s2) +{ + int size1 = strlen(s1); + int sisz2 = strlen(s2); + return _strnicmp(s1, s2, sisz2 > size1 ? sisz2 : size1); +} + +#undef ERROR + +#include "compat/mingw.h" + +#endif diff --git a/compat/nedmalloc/License.txt b/compat/nedmalloc/License.txt new file mode 100644 index 0000000000..36b7cd93cd --- /dev/null +++ b/compat/nedmalloc/License.txt @@ -0,0 +1,23 @@ +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/compat/nedmalloc/Readme.txt b/compat/nedmalloc/Readme.txt new file mode 100644 index 0000000000..07cbf50c0f --- /dev/null +++ b/compat/nedmalloc/Readme.txt @@ -0,0 +1,136 @@ +nedalloc v1.05 15th June 2008: +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +by Niall Douglas (http://www.nedprod.com/programs/portable/nedmalloc/) + +Enclosed is nedalloc, an alternative malloc implementation for multiple +threads without lock contention based on dlmalloc v2.8.4. It is more +or less a newer implementation of ptmalloc2, the standard allocator in +Linux (which is based on dlmalloc v2.7.0) but also contains a per-thread +cache for maximum CPU scalability. + +It is licensed under the Boost Software License which basically means +you can do anything you like with it. This does not apply to the malloc.c.h +file which remains copyright to others. + +It has been tested on win32 (x86), win64 (x64), Linux (x64), FreeBSD (x64) +and Apple MacOS X (x86). It works very well on all of these and is very +significantly faster than the system allocator on all of these platforms. + +By literally dropping in this allocator as a replacement for your system +allocator, you can see real world improvements of up to three times in normal +code! + +To use: +-=-=-=- +Drop in nedmalloc.h, nedmalloc.c and malloc.c.h into your project. +Configure using the instructions in nedmalloc.h. Run and enjoy. + +To test, compile test.c. It will run a comparison between your system +allocator and nedalloc and tell you how much faster nedalloc is. It also +serves as an example of usage. + +Notes: +-=-=-= +If you want the very latest version of this allocator, get it from the +TnFOX SVN repository at svn://svn.berlios.de/viewcvs/tnfox/trunk/src/nedmalloc + +Because of how nedalloc allocates an mspace per thread, it can cause +severe bloating of memory usage under certain allocation patterns. +You can substantially reduce this wastage by setting MAXTHREADSINPOOL +or the threads parameter to nedcreatepool() to a fraction of the number of +threads which would normally be in a pool at once. This will reduce +bloating at the cost of an increase in lock contention. If allocated size +is less than THREADCACHEMAX, locking is avoided 90-99% of the time and +if most of your allocations are below this value, you can safely set +MAXTHREADSINPOOL to one. + +You will suffer memory leakage unless you call neddisablethreadcache() +per pool for every thread which exits. This is because nedalloc cannot +portably know when a thread exits and thus when its thread cache can +be returned for use by other code. Don't forget pool zero, the system pool. + +For C++ type allocation patterns (where the same sizes of memory are +regularly allocated and deallocated as objects are created and destroyed), +the threadcache always benefits performance. If however your allocation +patterns are different, searching the threadcache may significantly slow +down your code - as a rule of thumb, if cache utilisation is below 80% +(see the source for neddisablethreadcache() for how to enable debug +printing in release mode) then you should disable the thread cache for +that thread. You can compile out the threadcache code by setting +THREADCACHEMAX to zero. + +Speed comparisons: +-=-=-=-=-=-=-=-=-= +See Benchmarks.xls for details. + +The enclosed test.c can do two things: it can be a torture test or a speed +test. The speed test is designed to be a representative synthetic +memory allocator test. It works by randomly mixing allocations with frees +with half of the allocation sizes being a two power multiple less than +512 bytes (to mimic C++ stack instantiated objects) and the other half +being a simple random value less than 16Kb. + +The real world code results are from Tn's TestIO benchmark. This is a +heavily multithreaded and memory intensive benchmark with a lot of branching +and other stuff modern processors don't like so much. As you'll note, the +test doesn't show the benefits of the threadcache mostly due to the saturation +of the memory bus being the limiting factor. + +ChangeLog: +-=-=-=-=-= +v1.05 15th June 2008: + * { 1042 } Added error check for TLSSET() and TLSFREE() macros. Thanks to +Markus Elfring for reporting this. + * { 1043 } Fixed a segfault when freeing memory allocated using +nedindependent_comalloc(). Thanks to Pavel Vozenilek for reporting this. + +v1.04 14th July 2007: + * Fixed a bug with the new optimised implementation that failed to lock +on a realloc under certain conditions. + * Fixed lack of thread synchronisation in InitPool() causing pool corruption + * Fixed a memory leak of thread cache contents on disabling. Thanks to Earl +Chew for reporting this. + * Added a sanity check for freed blocks being valid. + * Reworked test.c into being a torture test. + * Fixed GCC assembler optimisation misspecification + +v1.04alpha_svn915 7th October 2006: + * Fixed failure to unlock thread cache list if allocating a new list failed. +Thanks to Dmitry Chichkov for reporting this. Further thanks to Aleksey Sanin. + * Fixed realloc(0, <size>) segfaulting. Thanks to Dmitry Chichkov for +reporting this. + * Made config defines #ifndef so they can be overridden by the build system. +Thanks to Aleksey Sanin for suggesting this. + * Fixed deadlock in nedprealloc() due to unnecessary locking of preferred +thread mspace when mspace_realloc() always uses the original block's mspace +anyway. Thanks to Aleksey Sanin for reporting this. + * Made some speed improvements by hacking mspace_malloc() to no longer lock +its mspace, thus allowing the recursive mutex implementation to be removed +with an associated speed increase. Thanks to Aleksey Sanin for suggesting this. + * Fixed a bug where allocating mspaces overran its max limit. Thanks to +Aleksey Sanin for reporting this. + +v1.03 10th July 2006: + * Fixed memory corruption bug in threadcache code which only appeared with >4 +threads and in heavy use of the threadcache. + +v1.02 15th May 2006: + * Integrated dlmalloc v2.8.4, fixing the win32 memory release problem and +improving performance still further. Speed is now up to twice the speed of v1.01 +(average is 67% faster). + * Fixed win32 critical section implementation. Thanks to Pavel Kuznetsov +for reporting this. + * Wasn't locking mspace if all mspaces were locked. Thanks to Pavel Kuznetsov +for reporting this. + * Added Apple Mac OS X support. + +v1.01 24th February 2006: + * Fixed multiprocessor scaling problems by removing sources of cache sloshing + * Earl Chew <earl_chew <at> agilent <dot> com> sent patches for the following: + 1. size2binidx() wasn't working for default code path (non x86) + 2. Fixed failure to release mspace lock under certain circumstances which + caused a deadlock + +v1.00 1st January 2006: + * First release diff --git a/compat/nedmalloc/malloc.c.h b/compat/nedmalloc/malloc.c.h new file mode 100644 index 0000000000..f216a2a7d3 --- /dev/null +++ b/compat/nedmalloc/malloc.c.h @@ -0,0 +1,5756 @@ +/* + This is a version (aka dlmalloc) of malloc/free/realloc written by + Doug Lea and released to the public domain, as explained at + http://creativecommons.org/licenses/publicdomain. Send questions, + comments, complaints, performance data, etc to dl@cs.oswego.edu + +* Version pre-2.8.4 Mon Nov 27 11:22:37 2006 (dl at gee) + + Note: There may be an updated version of this malloc obtainable at + ftp://gee.cs.oswego.edu/pub/misc/malloc.c + Check before installing! + +* Quickstart + + This library is all in one file to simplify the most common usage: + ftp it, compile it (-O3), and link it into another program. All of + the compile-time options default to reasonable values for use on + most platforms. You might later want to step through various + compile-time and dynamic tuning options. + + For convenience, an include file for code using this malloc is at: + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.4.h + You don't really need this .h file unless you call functions not + defined in your system include files. The .h file contains only the + excerpts from this file needed for using this malloc on ANSI C/C++ + systems, so long as you haven't changed compile-time options about + naming and tuning parameters. If you do, then you can create your + own malloc.h that does include all settings by cutting at the point + indicated below. Note that you may already by default be using a C + library containing a malloc that is based on some version of this + malloc (for example in linux). You might still want to use the one + in this file to customize settings or to avoid overheads associated + with library versions. + +* Vital statistics: + + Supported pointer/size_t representation: 4 or 8 bytes + size_t MUST be an unsigned type of the same width as + pointers. (If you are using an ancient system that declares + size_t as a signed type, or need it to be a different width + than pointers, you can use a previous release of this malloc + (e.g. 2.7.2) supporting these.) + + Alignment: 8 bytes (default) + This suffices for nearly all current machines and C compilers. + However, you can define MALLOC_ALIGNMENT to be wider than this + if necessary (up to 128bytes), at the expense of using more space. + + Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) + 8 or 16 bytes (if 8byte sizes) + Each malloced chunk has a hidden word of overhead holding size + and status information, and additional cross-check word + if FOOTERS is defined. + + Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) + 8-byte ptrs: 32 bytes (including overhead) + + Even a request for zero bytes (i.e., malloc(0)) returns a + pointer to something of the minimum allocatable size. + The maximum overhead wastage (i.e., number of extra bytes + allocated than were requested in malloc) is less than or equal + to the minimum size, except for requests >= mmap_threshold that + are serviced via mmap(), where the worst case wastage is about + 32 bytes plus the remainder from a system page (the minimal + mmap unit); typically 4096 or 8192 bytes. + + Security: static-safe; optionally more or less + The "security" of malloc refers to the ability of malicious + code to accentuate the effects of errors (for example, freeing + space that is not currently malloc'ed or overwriting past the + ends of chunks) in code that calls malloc. This malloc + guarantees not to modify any memory locations below the base of + heap, i.e., static variables, even in the presence of usage + errors. The routines additionally detect most improper frees + and reallocs. All this holds as long as the static bookkeeping + for malloc itself is not corrupted by some other means. This + is only one aspect of security -- these checks do not, and + cannot, detect all possible programming errors. + + If FOOTERS is defined nonzero, then each allocated chunk + carries an additional check word to verify that it was malloced + from its space. These check words are the same within each + execution of a program using malloc, but differ across + executions, so externally crafted fake chunks cannot be + freed. This improves security by rejecting frees/reallocs that + could corrupt heap memory, in addition to the checks preventing + writes to statics that are always on. This may further improve + security at the expense of time and space overhead. (Note that + FOOTERS may also be worth using with MSPACES.) + + By default detected errors cause the program to abort (calling + "abort()"). You can override this to instead proceed past + errors by defining PROCEED_ON_ERROR. In this case, a bad free + has no effect, and a malloc that encounters a bad address + caused by user overwrites will ignore the bad address by + dropping pointers and indices to all known memory. This may + be appropriate for programs that should continue if at all + possible in the face of programming errors, although they may + run out of memory because dropped memory is never reclaimed. + + If you don't like either of these options, you can define + CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything + else. And if you are sure that your program using malloc has + no errors or vulnerabilities, you can define INSECURE to 1, + which might (or might not) provide a small performance improvement. + + Thread-safety: NOT thread-safe unless USE_LOCKS defined + When USE_LOCKS is defined, each public call to malloc, free, + etc is surrounded with either a pthread mutex or a win32 + spinlock (depending on WIN32). This is not especially fast, and + can be a major bottleneck. It is designed only to provide + minimal protection in concurrent environments, and to provide a + basis for extensions. If you are using malloc in a concurrent + program, consider instead using nedmalloc + (http://www.nedprod.com/programs/portable/nedmalloc/) or + ptmalloc (See http://www.malloc.de), which are derived + from versions of this malloc. + + System requirements: Any combination of MORECORE and/or MMAP/MUNMAP + This malloc can use unix sbrk or any emulation (invoked using + the CALL_MORECORE macro) and/or mmap/munmap or any emulation + (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system + memory. On most unix systems, it tends to work best if both + MORECORE and MMAP are enabled. On Win32, it uses emulations + based on VirtualAlloc. It also uses common C library functions + like memset. + + Compliance: I believe it is compliant with the Single Unix Specification + (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably + others as well. + +* Overview of algorithms + + This is not the fastest, most space-conserving, most portable, or + most tunable malloc ever written. However it is among the fastest + while also being among the most space-conserving, portable and + tunable. Consistent balance across these factors results in a good + general-purpose allocator for malloc-intensive programs. + + In most ways, this malloc is a best-fit allocator. Generally, it + chooses the best-fitting existing chunk for a request, with ties + broken in approximately least-recently-used order. (This strategy + normally maintains low fragmentation.) However, for requests less + than 256bytes, it deviates from best-fit when there is not an + exactly fitting available chunk by preferring to use space adjacent + to that used for the previous small request, as well as by breaking + ties in approximately most-recently-used order. (These enhance + locality of series of small allocations.) And for very large requests + (>= 256Kb by default), it relies on system memory mapping + facilities, if supported. (This helps avoid carrying around and + possibly fragmenting memory used only for large chunks.) + + All operations (except malloc_stats and mallinfo) have execution + times that are bounded by a constant factor of the number of bits in + a size_t, not counting any clearing in calloc or copying in realloc, + or actions surrounding MORECORE and MMAP that have times + proportional to the number of non-contiguous regions returned by + system allocation routines, which is often just 1. In real-time + applications, you can optionally suppress segment traversals using + NO_SEGMENT_TRAVERSAL, which assures bounded execution even when + system allocators return non-contiguous spaces, at the typical + expense of carrying around more memory and increased fragmentation. + + The implementation is not very modular and seriously overuses + macros. Perhaps someday all C compilers will do as good a job + inlining modular code as can now be done by brute-force expansion, + but now, enough of them seem not to. + + Some compilers issue a lot of warnings about code that is + dead/unreachable only on some platforms, and also about intentional + uses of negation on unsigned types. All known cases of each can be + ignored. + + For a longer but out of date high-level description, see + http://gee.cs.oswego.edu/dl/html/malloc.html + +* MSPACES + If MSPACES is defined, then in addition to malloc, free, etc., + this file also defines mspace_malloc, mspace_free, etc. These + are versions of malloc routines that take an "mspace" argument + obtained using create_mspace, to control all internal bookkeeping. + If ONLY_MSPACES is defined, only these versions are compiled. + So if you would like to use this allocator for only some allocations, + and your system malloc for others, you can compile with + ONLY_MSPACES and then do something like... + static mspace mymspace = create_mspace(0,0); // for example + #define mymalloc(bytes) mspace_malloc(mymspace, bytes) + + (Note: If you only need one instance of an mspace, you can instead + use "USE_DL_PREFIX" to relabel the global malloc.) + + You can similarly create thread-local allocators by storing + mspaces as thread-locals. For example: + static __thread mspace tlms = 0; + void* tlmalloc(size_t bytes) { + if (tlms == 0) tlms = create_mspace(0, 0); + return mspace_malloc(tlms, bytes); + } + void tlfree(void* mem) { mspace_free(tlms, mem); } + + Unless FOOTERS is defined, each mspace is completely independent. + You cannot allocate from one and free to another (although + conformance is only weakly checked, so usage errors are not always + caught). If FOOTERS is defined, then each chunk carries around a tag + indicating its originating mspace, and frees are directed to their + originating spaces. + + ------------------------- Compile-time options --------------------------- + +Be careful in setting #define values for numerical constants of type +size_t. On some systems, literal values are not automatically extended +to size_t precision unless they are explicitly casted. You can also +use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. + +WIN32 default: defined if _WIN32 defined + Defining WIN32 sets up defaults for MS environment and compilers. + Otherwise defaults are for unix. Beware that there seem to be some + cases where this malloc might not be a pure drop-in replacement for + Win32 malloc: Random-looking failures from Win32 GDI API's (eg; + SetDIBits()) may be due to bugs in some video driver implementations + when pixel buffers are malloc()ed, and the region spans more than + one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) + default granularity, pixel buffers may straddle virtual allocation + regions more often than when using the Microsoft allocator. You can + avoid this by using VirtualAlloc() and VirtualFree() for all pixel + buffers rather than using malloc(). If this is not possible, + recompile this malloc with a larger DEFAULT_GRANULARITY. + +MALLOC_ALIGNMENT default: (size_t)8 + Controls the minimum alignment for malloc'ed chunks. It must be a + power of two and at least 8, even on machines for which smaller + alignments would suffice. It may be defined as larger than this + though. Note however that code and data structures are optimized for + the case of 8-byte alignment. + +MSPACES default: 0 (false) + If true, compile in support for independent allocation spaces. + This is only supported if HAVE_MMAP is true. + +ONLY_MSPACES default: 0 (false) + If true, only compile in mspace versions, not regular versions. + +USE_LOCKS default: 0 (false) + Causes each call to each public routine to be surrounded with + pthread or WIN32 mutex lock/unlock. (If set true, this can be + overridden on a per-mspace basis for mspace versions.) If set to a + non-zero value other than 1, locks are used, but their + implementation is left out, so lock functions must be supplied manually. + +USE_SPIN_LOCKS default: 1 iff USE_LOCKS and on x86 using gcc or MSC + If true, uses custom spin locks for locking. This is currently + supported only for x86 platforms using gcc or recent MS compilers. + Otherwise, posix locks or win32 critical sections are used. + +FOOTERS default: 0 + If true, provide extra checking and dispatching by placing + information in the footers of allocated chunks. This adds + space and time overhead. + +INSECURE default: 0 + If true, omit checks for usage errors and heap space overwrites. + +USE_DL_PREFIX default: NOT defined + Causes compiler to prefix all public routines with the string 'dl'. + This can be useful when you only want to use this malloc in one part + of a program, using your regular system malloc elsewhere. + +ABORT default: defined as abort() + Defines how to abort on failed checks. On most systems, a failed + check cannot die with an "assert" or even print an informative + message, because the underlying print routines in turn call malloc, + which will fail again. Generally, the best policy is to simply call + abort(). It's not very useful to do more than this because many + errors due to overwriting will show up as address faults (null, odd + addresses etc) rather than malloc-triggered checks, so will also + abort. Also, most compilers know that abort() does not return, so + can better optimize code conditionally calling it. + +PROCEED_ON_ERROR default: defined as 0 (false) + Controls whether detected bad addresses cause them to bypassed + rather than aborting. If set, detected bad arguments to free and + realloc are ignored. And all bookkeeping information is zeroed out + upon a detected overwrite of freed heap space, thus losing the + ability to ever return it from malloc again, but enabling the + application to proceed. If PROCEED_ON_ERROR is defined, the + static variable malloc_corruption_error_count is compiled in + and can be examined to see if errors have occurred. This option + generates slower code than the default abort policy. + +DEBUG default: NOT defined + The DEBUG setting is mainly intended for people trying to modify + this code or diagnose problems when porting to new platforms. + However, it may also be able to better isolate user errors than just + using runtime checks. The assertions in the check routines spell + out in more detail the assumptions and invariants underlying the + algorithms. The checking is fairly extensive, and will slow down + execution noticeably. Calling malloc_stats or mallinfo with DEBUG + set will attempt to check every non-mmapped allocated and free chunk + in the course of computing the summaries. + +ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) + Debugging assertion failures can be nearly impossible if your + version of the assert macro causes malloc to be called, which will + lead to a cascade of further failures, blowing the runtime stack. + ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), + which will usually make debugging easier. + +MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 + The action to take before "return 0" when malloc fails to be able to + return memory because there is none available. + +HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES + True if this system supports sbrk or an emulation of it. + +MORECORE default: sbrk + The name of the sbrk-style system routine to call to obtain more + memory. See below for guidance on writing custom MORECORE + functions. The type of the argument to sbrk/MORECORE varies across + systems. It cannot be size_t, because it supports negative + arguments, so it is normally the signed type of the same width as + size_t (sometimes declared as "intptr_t"). It doesn't much matter + though. Internally, we only call it with arguments less than half + the max value of a size_t, which should work across all reasonable + possibilities, although sometimes generating compiler warnings. + +MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE + If true, take advantage of fact that consecutive calls to MORECORE + with positive arguments always return contiguous increasing + addresses. This is true of unix sbrk. It does not hurt too much to + set it true anyway, since malloc copes with non-contiguities. + Setting it false when definitely non-contiguous saves time + and possibly wasted space it would take to discover this though. + +MORECORE_CANNOT_TRIM default: NOT defined + True if MORECORE cannot release space back to the system when given + negative arguments. This is generally necessary only if you are + using a hand-crafted MORECORE function that cannot handle negative + arguments. + +NO_SEGMENT_TRAVERSAL default: 0 + If non-zero, suppresses traversals of memory segments + returned by either MORECORE or CALL_MMAP. This disables + merging of segments that are contiguous, and selectively + releasing them to the OS if unused, but bounds execution times. + +HAVE_MMAP default: 1 (true) + True if this system supports mmap or an emulation of it. If so, and + HAVE_MORECORE is not true, MMAP is used for all system + allocation. If set and HAVE_MORECORE is true as well, MMAP is + primarily used to directly allocate very large blocks. It is also + used as a backup strategy in cases where MORECORE fails to provide + space from system. Note: A single call to MUNMAP is assumed to be + able to unmap memory that may have be allocated using multiple calls + to MMAP, so long as they are adjacent. + +HAVE_MREMAP default: 1 on linux, else 0 + If true realloc() uses mremap() to re-allocate large blocks and + extend or shrink allocation spaces. + +MMAP_CLEARS default: 1 except on WINCE. + True if mmap clears memory so calloc doesn't need to. This is true + for standard unix mmap using /dev/zero and on WIN32 except for WINCE. + +USE_BUILTIN_FFS default: 0 (i.e., not used) + Causes malloc to use the builtin ffs() function to compute indices. + Some compilers may recognize and intrinsify ffs to be faster than the + supplied C version. Also, the case of x86 using gcc is special-cased + to an asm instruction, so is already as fast as it can be, and so + this setting has no effect. Similarly for Win32 under recent MS compilers. + (On most x86s, the asm version is only slightly faster than the C version.) + +malloc_getpagesize default: derive from system includes, or 4096. + The system page size. To the extent possible, this malloc manages + memory from the system in page-size units. This may be (and + usually is) a function rather than a constant. This is ignored + if WIN32, where page size is determined using getSystemInfo during + initialization. + +USE_DEV_RANDOM default: 0 (i.e., not used) + Causes malloc to use /dev/random to initialize secure magic seed for + stamping footers. Otherwise, the current time is used. + +NO_MALLINFO default: 0 + If defined, don't compile "mallinfo". This can be a simple way + of dealing with mismatches between system declarations and + those in this file. + +MALLINFO_FIELD_TYPE default: size_t + The type of the fields in the mallinfo struct. This was originally + defined as "int" in SVID etc, but is more usefully defined as + size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set + +REALLOC_ZERO_BYTES_FREES default: not defined + This should be set if a call to realloc with zero bytes should + be the same as a call to free. Some people think it should. Otherwise, + since this malloc returns a unique pointer for malloc(0), so does + realloc(p, 0). + +LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H +LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H +LACKS_STDLIB_H default: NOT defined unless on WIN32 + Define these if your system does not have these header files. + You might need to manually insert some of the declarations they provide. + +DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, + system_info.dwAllocationGranularity in WIN32, + otherwise 64K. + Also settable using mallopt(M_GRANULARITY, x) + The unit for allocating and deallocating memory from the system. On + most systems with contiguous MORECORE, there is no reason to + make this more than a page. However, systems with MMAP tend to + either require or encourage larger granularities. You can increase + this value to prevent system allocation functions to be called so + often, especially if they are slow. The value must be at least one + page and must be a power of two. Setting to 0 causes initialization + to either page size or win32 region size. (Note: In previous + versions of malloc, the equivalent of this option was called + "TOP_PAD") + +DEFAULT_TRIM_THRESHOLD default: 2MB + Also settable using mallopt(M_TRIM_THRESHOLD, x) + The maximum amount of unused top-most memory to keep before + releasing via malloc_trim in free(). Automatic trimming is mainly + useful in long-lived programs using contiguous MORECORE. Because + trimming via sbrk can be slow on some systems, and can sometimes be + wasteful (in cases where programs immediately afterward allocate + more large chunks) the value should be high enough so that your + overall system performance would improve by releasing this much + memory. As a rough guide, you might set to a value close to the + average size of a process (program) running on your system. + Releasing this much memory would allow such a process to run in + memory. Generally, it is worth tuning trim thresholds when a + program undergoes phases where several large chunks are allocated + and released in ways that can reuse each other's storage, perhaps + mixed with phases where there are no such chunks at all. The trim + value must be greater than page size to have any useful effect. To + disable trimming completely, you can set to MAX_SIZE_T. Note that the trick + some people use of mallocing a huge space and then freeing it at + program startup, in an attempt to reserve system memory, doesn't + have the intended effect under automatic trimming, since that memory + will immediately be returned to the system. + +DEFAULT_MMAP_THRESHOLD default: 256K + Also settable using mallopt(M_MMAP_THRESHOLD, x) + The request size threshold for using MMAP to directly service a + request. Requests of at least this size that cannot be allocated + using already-existing space will be serviced via mmap. (If enough + normal freed space already exists it is used instead.) Using mmap + segregates relatively large chunks of memory so that they can be + individually obtained and released from the host system. A request + serviced through mmap is never reused by any other request (at least + not directly; the system may just so happen to remap successive + requests to the same locations). Segregating space in this way has + the benefits that: Mmapped space can always be individually released + back to the system, which helps keep the system level memory demands + of a long-lived program low. Also, mapped memory doesn't become + `locked' between other chunks, as can happen with normally allocated + chunks, which means that even trimming via malloc_trim would not + release them. However, it has the disadvantage that the space + cannot be reclaimed, consolidated, and then used to service later + requests, as happens with normal chunks. The advantages of mmap + nearly always outweigh disadvantages for "large" chunks, but the + value of "large" may vary across systems. The default is an + empirically derived value that works well in most systems. You can + disable mmap by setting to MAX_SIZE_T. + +MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP + The number of consolidated frees between checks to release + unused segments when freeing. When using non-contiguous segments, + especially with multiple mspaces, checking only for topmost space + doesn't always suffice to trigger trimming. To compensate for this, + free() will, with a period of MAX_RELEASE_CHECK_RATE (or the + current number of segments, if greater) try to release unused + segments to the OS when freeing chunks that result in + consolidation. The best value for this parameter is a compromise + between slowing down frees with relatively costly checks that + rarely trigger versus holding on to unused memory. To effectively + disable, set to MAX_SIZE_T. This may lead to a very slight speed + improvement at the expense of carrying around more memory. +*/ + +/* Version identifier to allow people to support multiple versions */ +#ifndef DLMALLOC_VERSION +#define DLMALLOC_VERSION 20804 +#endif /* DLMALLOC_VERSION */ + +#if defined(linux) +#define _GNU_SOURCE 1 +#endif + +#ifndef WIN32 +#ifdef _WIN32 +#define WIN32 1 +#endif /* _WIN32 */ +#ifdef _WIN32_WCE +#define LACKS_FCNTL_H +#define WIN32 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ +#ifdef WIN32 +#define WIN32_LEAN_AND_MEAN +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x403 +#endif +#include <windows.h> +#define HAVE_MMAP 1 +#define HAVE_MORECORE 0 +#define LACKS_UNISTD_H +#define LACKS_SYS_PARAM_H +#define LACKS_SYS_MMAN_H +#define LACKS_STRING_H +#define LACKS_STRINGS_H +#define LACKS_SYS_TYPES_H +#define LACKS_ERRNO_H +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION +#endif /* MALLOC_FAILURE_ACTION */ +#ifdef _WIN32_WCE /* WINCE reportedly does not clear */ +#define MMAP_CLEARS 0 +#else +#define MMAP_CLEARS 1 +#endif /* _WIN32_WCE */ +#endif /* WIN32 */ + +#if defined(DARWIN) || defined(_DARWIN) +/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ +#ifndef HAVE_MORECORE +#define HAVE_MORECORE 0 +#define HAVE_MMAP 1 +/* OSX allocators provide 16 byte alignment */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)16U) +#endif +#endif /* HAVE_MORECORE */ +#endif /* DARWIN */ + +#ifndef LACKS_SYS_TYPES_H +#include <sys/types.h> /* For size_t */ +#endif /* LACKS_SYS_TYPES_H */ + +/* The maximum possible size_t value has all bits set */ +#define MAX_SIZE_T (~(size_t)0) + +#ifndef ONLY_MSPACES +#define ONLY_MSPACES 0 /* define to a value */ +#else +#define ONLY_MSPACES 1 +#endif /* ONLY_MSPACES */ +#ifndef MSPACES +#if ONLY_MSPACES +#define MSPACES 1 +#else /* ONLY_MSPACES */ +#define MSPACES 0 +#endif /* ONLY_MSPACES */ +#endif /* MSPACES */ +#ifndef MALLOC_ALIGNMENT +#define MALLOC_ALIGNMENT ((size_t)8U) +#endif /* MALLOC_ALIGNMENT */ +#ifndef FOOTERS +#define FOOTERS 0 +#endif /* FOOTERS */ +#ifndef ABORT +#define ABORT abort() +#endif /* ABORT */ +#ifndef ABORT_ON_ASSERT_FAILURE +#define ABORT_ON_ASSERT_FAILURE 1 +#endif /* ABORT_ON_ASSERT_FAILURE */ +#ifndef PROCEED_ON_ERROR +#define PROCEED_ON_ERROR 0 +#endif /* PROCEED_ON_ERROR */ +#ifndef USE_LOCKS +#define USE_LOCKS 0 +#endif /* USE_LOCKS */ +#ifndef USE_SPIN_LOCKS +#if USE_LOCKS && (defined(__GNUC__) && ((defined(__i386__) || defined(__x86_64__)))) || (defined(_MSC_VER) && _MSC_VER>=1310) +#define USE_SPIN_LOCKS 1 +#else +#define USE_SPIN_LOCKS 0 +#endif /* USE_LOCKS && ... */ +#endif /* USE_SPIN_LOCKS */ +#ifndef INSECURE +#define INSECURE 0 +#endif /* INSECURE */ +#ifndef HAVE_MMAP +#define HAVE_MMAP 1 +#endif /* HAVE_MMAP */ +#ifndef MMAP_CLEARS +#define MMAP_CLEARS 1 +#endif /* MMAP_CLEARS */ +#ifndef HAVE_MREMAP +#ifdef linux +#define HAVE_MREMAP 1 +#else /* linux */ +#define HAVE_MREMAP 0 +#endif /* linux */ +#endif /* HAVE_MREMAP */ +#ifndef MALLOC_FAILURE_ACTION +#define MALLOC_FAILURE_ACTION errno = ENOMEM; +#endif /* MALLOC_FAILURE_ACTION */ +#ifndef HAVE_MORECORE +#if ONLY_MSPACES +#define HAVE_MORECORE 0 +#else /* ONLY_MSPACES */ +#define HAVE_MORECORE 1 +#endif /* ONLY_MSPACES */ +#endif /* HAVE_MORECORE */ +#if !HAVE_MORECORE +#define MORECORE_CONTIGUOUS 0 +#else /* !HAVE_MORECORE */ +#define MORECORE_DEFAULT sbrk +#ifndef MORECORE_CONTIGUOUS +#define MORECORE_CONTIGUOUS 1 +#endif /* MORECORE_CONTIGUOUS */ +#endif /* HAVE_MORECORE */ +#ifndef DEFAULT_GRANULARITY +#if (MORECORE_CONTIGUOUS || defined(WIN32)) +#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ +#else /* MORECORE_CONTIGUOUS */ +#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) +#endif /* MORECORE_CONTIGUOUS */ +#endif /* DEFAULT_GRANULARITY */ +#ifndef DEFAULT_TRIM_THRESHOLD +#ifndef MORECORE_CANNOT_TRIM +#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#else /* MORECORE_CANNOT_TRIM */ +#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T +#endif /* MORECORE_CANNOT_TRIM */ +#endif /* DEFAULT_TRIM_THRESHOLD */ +#ifndef DEFAULT_MMAP_THRESHOLD +#if HAVE_MMAP +#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) +#else /* HAVE_MMAP */ +#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* DEFAULT_MMAP_THRESHOLD */ +#ifndef MAX_RELEASE_CHECK_RATE +#if HAVE_MMAP +#define MAX_RELEASE_CHECK_RATE 4095 +#else +#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T +#endif /* HAVE_MMAP */ +#endif /* MAX_RELEASE_CHECK_RATE */ +#ifndef USE_BUILTIN_FFS +#define USE_BUILTIN_FFS 0 +#endif /* USE_BUILTIN_FFS */ +#ifndef USE_DEV_RANDOM +#define USE_DEV_RANDOM 0 +#endif /* USE_DEV_RANDOM */ +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif /* NO_MALLINFO */ +#ifndef MALLINFO_FIELD_TYPE +#define MALLINFO_FIELD_TYPE size_t +#endif /* MALLINFO_FIELD_TYPE */ +#ifndef NO_SEGMENT_TRAVERSAL +#define NO_SEGMENT_TRAVERSAL 0 +#endif /* NO_SEGMENT_TRAVERSAL */ + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ + +#define M_TRIM_THRESHOLD (-1) +#define M_GRANULARITY (-2) +#define M_MMAP_THRESHOLD (-3) + +/* ------------------------ Mallinfo declarations ------------------------ */ + +#if !NO_MALLINFO +/* + This version of malloc supports the standard SVID/XPG mallinfo + routine that returns a struct containing usage properties and + statistics. It should work on any system that has a + /usr/include/malloc.h defining struct mallinfo. The main + declaration needed is the mallinfo struct that is returned (by-copy) + by mallinfo(). The malloinfo struct contains a bunch of fields that + are not even meaningful in this version of malloc. These fields are + are instead filled by mallinfo() with other numbers that might be of + interest. + + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a + /usr/include/malloc.h file that includes a declaration of struct + mallinfo. If so, it is included; else a compliant version is + declared below. These must be precisely the same for mallinfo() to + work. The original SVID version of this struct, defined on most + systems with mallinfo, declares all fields as ints. But some others + define as unsigned long. If your system defines the fields using a + type of different width than listed here, you MUST #include your + system version and #define HAVE_USR_INCLUDE_MALLOC_H. +*/ + +/* #define HAVE_USR_INCLUDE_MALLOC_H */ + +#ifdef HAVE_USR_INCLUDE_MALLOC_H +#include "/usr/include/malloc.h" +#else /* HAVE_USR_INCLUDE_MALLOC_H */ +#ifndef STRUCT_MALLINFO_DECLARED +#define STRUCT_MALLINFO_DECLARED 1 +struct mallinfo { + MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ + MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ + MALLINFO_FIELD_TYPE smblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblks; /* always 0 */ + MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ + MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ + MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ + MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ + MALLINFO_FIELD_TYPE fordblks; /* total free space */ + MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ +}; +#endif /* STRUCT_MALLINFO_DECLARED */ +#endif /* HAVE_USR_INCLUDE_MALLOC_H */ +#endif /* NO_MALLINFO */ + +/* + Try to persuade compilers to inline. The most critical functions for + inlining are defined as macros, so these aren't used for them. +*/ + +#ifndef FORCEINLINE + #if defined(__GNUC__) +#define FORCEINLINE __inline __attribute__ ((always_inline)) + #elif defined(_MSC_VER) + #define FORCEINLINE __forceinline + #endif +#endif +#ifndef NOINLINE + #if defined(__GNUC__) + #define NOINLINE __attribute__ ((noinline)) + #elif defined(_MSC_VER) + #define NOINLINE __declspec(noinline) + #else + #define NOINLINE + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#ifndef FORCEINLINE + #define FORCEINLINE inline +#endif +#endif /* __cplusplus */ +#ifndef FORCEINLINE + #define FORCEINLINE +#endif + +#if !ONLY_MSPACES + +/* ------------------- Declarations of public routines ------------------- */ + +#ifndef USE_DL_PREFIX +#define dlcalloc calloc +#define dlfree free +#define dlmalloc malloc +#define dlmemalign memalign +#define dlrealloc realloc +#define dlvalloc valloc +#define dlpvalloc pvalloc +#define dlmallinfo mallinfo +#define dlmallopt mallopt +#define dlmalloc_trim malloc_trim +#define dlmalloc_stats malloc_stats +#define dlmalloc_usable_size malloc_usable_size +#define dlmalloc_footprint malloc_footprint +#define dlmalloc_max_footprint malloc_max_footprint +#define dlindependent_calloc independent_calloc +#define dlindependent_comalloc independent_comalloc +#endif /* USE_DL_PREFIX */ + + +/* + malloc(size_t n) + Returns a pointer to a newly allocated chunk of at least n bytes, or + null if no space is available, in which case errno is set to ENOMEM + on ANSI C systems. + + If n is zero, malloc returns a minimum-sized chunk. (The minimum + size is 16 bytes on most 32bit systems, and 32 bytes on 64bit + systems.) Note that size_t is an unsigned type, so calls with + arguments that would be negative if signed are interpreted as + requests for huge amounts of space, which will often fail. The + maximum supported value of n differs across systems, but is in all + cases less than the maximum representable value of a size_t. +*/ +void* dlmalloc(size_t); + +/* + free(void* p) + Releases the chunk of memory pointed to by p, that had been previously + allocated using malloc or a related routine such as realloc. + It has no effect if p is null. If p was not malloced or already + freed, free(p) will by default cause the current program to abort. +*/ +void dlfree(void*); + +/* + calloc(size_t n_elements, size_t element_size); + Returns a pointer to n_elements * element_size bytes, with all locations + set to zero. +*/ +void* dlcalloc(size_t, size_t); + +/* + realloc(void* p, size_t n) + Returns a pointer to a chunk of size n that contains the same data + as does chunk p up to the minimum of (n, p's size) bytes, or null + if no space is available. + + The returned pointer may or may not be the same as p. The algorithm + prefers extending p in most cases when possible, otherwise it + employs the equivalent of a malloc-copy-free sequence. + + If p is null, realloc is equivalent to malloc. + + If space is not available, realloc returns null, errno is set (if on + ANSI) and p is NOT freed. + + if n is for fewer bytes than already held by p, the newly unused + space is lopped off and freed if possible. realloc with a size + argument of zero (re)allocates a minimum-sized chunk. + + The old unix realloc convention of allowing the last-free'd chunk + to be used as an argument to realloc is not supported. +*/ + +void* dlrealloc(void*, size_t); + +/* + memalign(size_t alignment, size_t n); + Returns a pointer to a newly allocated chunk of n bytes, aligned + in accord with the alignment argument. + + The alignment argument should be a power of two. If the argument is + not a power of two, the nearest greater power is used. + 8-byte alignment is guaranteed by normal malloc calls, so don't + bother calling memalign with an argument of 8 or less. + + Overreliance on memalign is a sure way to fragment space. +*/ +void* dlmemalign(size_t, size_t); + +/* + valloc(size_t n); + Equivalent to memalign(pagesize, n), where pagesize is the page + size of the system. If the pagesize is unknown, 4096 is used. +*/ +void* dlvalloc(size_t); + +/* + mallopt(int parameter_number, int parameter_value) + Sets tunable parameters The format is to provide a + (parameter-number, parameter-value) pair. mallopt then sets the + corresponding parameter to the argument value if it can (i.e., so + long as the value is meaningful), and returns 1 if successful else + 0. To workaround the fact that mallopt is specified to use int, + not size_t parameters, the value -1 is specially treated as the + maximum unsigned size_t value. + + SVID/XPG/ANSI defines four standard param numbers for mallopt, + normally defined in malloc.h. None of these are use in this malloc, + so setting them has no effect. But this malloc also supports other + options in mallopt. See below for details. Briefly, supported + parameters are as follows (listed defaults are for "typical" + configurations). + + Symbol param # default allowed param values + M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) + M_GRANULARITY -2 page size any power of 2 >= page size + M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) +*/ +int dlmallopt(int, int); + +/* + malloc_footprint(); + Returns the number of bytes obtained from the system. The total + number of bytes allocated by malloc, realloc etc., is less than this + value. Unlike mallinfo, this function returns only a precomputed + result, so can be called frequently to monitor memory consumption. + Even if locks are otherwise defined, this function does not use them, + so results might not be up to date. +*/ +size_t dlmalloc_footprint(void); + +/* + malloc_max_footprint(); + Returns the maximum number of bytes obtained from the system. This + value will be greater than current footprint if deallocated space + has been reclaimed by the system. The peak number of bytes allocated + by malloc, realloc etc., is less than this value. Unlike mallinfo, + this function returns only a precomputed result, so can be called + frequently to monitor memory consumption. Even if locks are + otherwise defined, this function does not use them, so results might + not be up to date. +*/ +size_t dlmalloc_max_footprint(void); + +#if !NO_MALLINFO +/* + mallinfo() + Returns (by copy) a struct containing various summary statistics: + + arena: current total non-mmapped bytes allocated from system + ordblks: the number of free chunks + smblks: always zero. + hblks: current number of mmapped regions + hblkhd: total bytes held in mmapped regions + usmblks: the maximum total allocated space. This will be greater + than current total if trimming has occurred. + fsmblks: always zero + uordblks: current total allocated space (normal or mmapped) + fordblks: total free space + keepcost: the maximum number of bytes that could ideally be released + back to system via malloc_trim. ("ideally" means that + it ignores page restrictions etc.) + + Because these fields are ints, but internal bookkeeping may + be kept as longs, the reported values may wrap around zero and + thus be inaccurate. +*/ +struct mallinfo dlmallinfo(void); +#endif /* NO_MALLINFO */ + +/* + independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); + + independent_calloc is similar to calloc, but instead of returning a + single cleared space, it returns an array of pointers to n_elements + independent elements that can hold contents of size elem_size, each + of which starts out cleared, and can be independently freed, + realloc'ed etc. The elements are guaranteed to be adjacently + allocated (this is not guaranteed to occur with multiple callocs or + mallocs), which may also improve cache locality in some + applications. + + The "chunks" argument is optional (i.e., may be null, which is + probably the most typical usage). If it is null, the returned array + is itself dynamically allocated and should also be freed when it is + no longer needed. Otherwise, the chunks array must be of at least + n_elements in length. It is filled in with the pointers to the + chunks. + + In either case, independent_calloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and "chunks" + is null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use regular calloc and assign pointers into this + space to represent elements. (In this case though, you cannot + independently free elements.) + + independent_calloc simplifies and speeds up implementations of many + kinds of pools. It may also be useful when constructing large data + structures that initially have a fixed number of fixed-sized nodes, + but the number is not known at compile time, and some of the nodes + may later need to be freed. For example: + + struct Node { int item; struct Node* next; }; + + struct Node* build_list() { + struct Node** pool; + int n = read_number_of_nodes_needed(); + if (n <= 0) return 0; + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); + if (pool == 0) die(); + // organize into a linked list... + struct Node* first = pool[0]; + for (i = 0; i < n-1; ++i) + pool[i]->next = pool[i+1]; + free(pool); // Can now free the array (or not, if it is needed later) + return first; + } +*/ +void** dlindependent_calloc(size_t, size_t, void**); + +/* + independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); + + independent_comalloc allocates, all at once, a set of n_elements + chunks with sizes indicated in the "sizes" array. It returns + an array of pointers to these elements, each of which can be + independently freed, realloc'ed etc. The elements are guaranteed to + be adjacently allocated (this is not guaranteed to occur with + multiple callocs or mallocs), which may also improve cache locality + in some applications. + + The "chunks" argument is optional (i.e., may be null). If it is null + the returned array is itself dynamically allocated and should also + be freed when it is no longer needed. Otherwise, the chunks array + must be of at least n_elements in length. It is filled in with the + pointers to the chunks. + + In either case, independent_comalloc returns this pointer array, or + null if the allocation failed. If n_elements is zero and chunks is + null, it returns a chunk representing an array with zero elements + (which should be freed if not wanted). + + Each element must be individually freed when it is no longer + needed. If you'd like to instead be able to free all at once, you + should instead use a single regular malloc, and assign pointers at + particular offsets in the aggregate space. (In this case though, you + cannot independently free elements.) + + independent_comallac differs from independent_calloc in that each + element may have a different size, and also that it does not + automatically clear elements. + + independent_comalloc can be used to speed up allocation in cases + where several structs or objects must always be allocated at the + same time. For example: + + struct Head { ... } + struct Foot { ... } + + void send_message(char* msg) { + int msglen = strlen(msg); + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; + void* chunks[3]; + if (independent_comalloc(3, sizes, chunks) == 0) + die(); + struct Head* head = (struct Head*)(chunks[0]); + char* body = (char*)(chunks[1]); + struct Foot* foot = (struct Foot*)(chunks[2]); + // ... + } + + In general though, independent_comalloc is worth using only for + larger values of n_elements. For small values, you probably won't + detect enough difference from series of malloc calls to bother. + + Overuse of independent_comalloc can increase overall memory usage, + since it cannot reuse existing noncontiguous small chunks that + might be available for some of the elements. +*/ +void** dlindependent_comalloc(size_t, size_t*, void**); + + +/* + pvalloc(size_t n); + Equivalent to valloc(minimum-page-that-holds(n)), that is, + round up n to nearest pagesize. + */ +void* dlpvalloc(size_t); + +/* + malloc_trim(size_t pad); + + If possible, gives memory back to the system (via negative arguments + to sbrk) if there is unused memory at the `high' end of the malloc + pool or in unused MMAP segments. You can call this after freeing + large blocks of memory to potentially reduce the system-level memory + requirements of a program. However, it cannot guarantee to reduce + memory. Under some allocation patterns, some large free blocks of + memory will be locked between two used chunks, so they cannot be + given back to the system. + + The `pad' argument to malloc_trim represents the amount of free + trailing space to leave untrimmed. If this argument is zero, only + the minimum amount of memory to maintain internal data structures + will be left. Non-zero arguments can be supplied to maintain enough + trailing space to service future expected allocations without having + to re-obtain memory from the system. + + Malloc_trim returns 1 if it actually released any memory, else 0. +*/ +int dlmalloc_trim(size_t); + +/* + malloc_stats(); + Prints on stderr the amount of space obtained from the system (both + via sbrk and mmap), the maximum amount (which may be more than + current if malloc_trim and/or munmap got called), and the current + number of bytes allocated via malloc (or realloc, etc) but not yet + freed. Note that this is the number of bytes allocated, not the + number requested. It will be larger than the number requested + because of alignment and bookkeeping overhead. Because it includes + alignment wastage as being in use, this figure may be greater than + zero even when no user-level chunks are allocated. + + The reported current and maximum system memory can be inaccurate if + a program makes other calls to system memory allocation functions + (normally sbrk) outside of malloc. + + malloc_stats prints only the most commonly interesting statistics. + More information can be obtained by calling mallinfo. +*/ +void dlmalloc_stats(void); + +#endif /* ONLY_MSPACES */ + +/* + malloc_usable_size(void* p); + + Returns the number of bytes you can actually use in + an allocated chunk, which may be more than you requested (although + often not) due to alignment and minimum size constraints. + You can use this many bytes without worrying about + overwriting other allocated objects. This is not a particularly great + programming practice. malloc_usable_size can be more useful in + debugging and assertions, for example: + + p = malloc(n); + assert(malloc_usable_size(p) >= 256); +*/ +size_t dlmalloc_usable_size(void*); + + +#if MSPACES + +/* + mspace is an opaque type representing an independent + region of space that supports mspace_malloc, etc. +*/ +typedef void* mspace; + +/* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). +*/ +mspace create_mspace(size_t capacity, int locked); + +/* + destroy_mspace destroys the given space, and attempts to return all + of its memory back to the system, returning the total number of + bytes freed. After destruction, the results of access to all memory + used by the space become undefined. +*/ +size_t destroy_mspace(mspace msp); + +/* + create_mspace_with_base uses the memory supplied as the initial base + of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this + space is used for bookkeeping, so the capacity must be at least this + large. (Otherwise 0 is returned.) When this initial space is + exhausted, additional memory will be obtained from the system. + Destroying this space will deallocate all additionally allocated + space (if possible) but not the initial base. +*/ +mspace create_mspace_with_base(void* base, size_t capacity, int locked); + +/* + mspace_mmap_large_chunks controls whether requests for large chunks + are allocated in their own mmapped regions, separate from others in + this mspace. By default this is enabled, which reduces + fragmentation. However, such chunks are not necessarily released to + the system upon destroy_mspace. Disabling by setting to false may + increase fragmentation, but avoids leakage when relying on + destroy_mspace to release all memory allocated using this space. +*/ +int mspace_mmap_large_chunks(mspace msp, int enable); + + +/* + mspace_malloc behaves as malloc, but operates within + the given space. +*/ +void* mspace_malloc(mspace msp, size_t bytes); + +/* + mspace_free behaves as free, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_free is not actually needed. + free may be called instead of mspace_free because freed chunks from + any space are handled by their originating spaces. +*/ +void mspace_free(mspace msp, void* mem); + +/* + mspace_realloc behaves as realloc, but operates within + the given space. + + If compiled with FOOTERS==1, mspace_realloc is not actually + needed. realloc may be called instead of mspace_realloc because + realloced chunks from any space are handled by their originating + spaces. +*/ +void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +/* + mspace_calloc behaves as calloc, but operates within + the given space. +*/ +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + +/* + mspace_memalign behaves as memalign, but operates within + the given space. +*/ +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + +/* + mspace_independent_calloc behaves as independent_calloc, but + operates within the given space. +*/ +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + +/* + mspace_independent_comalloc behaves as independent_comalloc, but + operates within the given space. +*/ +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + +/* + mspace_footprint() returns the number of bytes obtained from the + system for this space. +*/ +size_t mspace_footprint(mspace msp); + +/* + mspace_max_footprint() returns the peak number of bytes obtained from the + system for this space. +*/ +size_t mspace_max_footprint(mspace msp); + + +#if !NO_MALLINFO +/* + mspace_mallinfo behaves as mallinfo, but reports properties of + the given space. +*/ +struct mallinfo mspace_mallinfo(mspace msp); +#endif /* NO_MALLINFO */ + +/* + malloc_usable_size(void* p) behaves the same as malloc_usable_size; +*/ + size_t mspace_usable_size(void* mem); + +/* + mspace_malloc_stats behaves as malloc_stats, but reports + properties of the given space. +*/ +void mspace_malloc_stats(mspace msp); + +/* + mspace_trim behaves as malloc_trim, but + operates within the given space. +*/ +int mspace_trim(mspace msp, size_t pad); + +/* + An alias for mallopt. +*/ +int mspace_mallopt(int, int); + +#endif /* MSPACES */ + +#ifdef __cplusplus +}; /* end of extern "C" */ +#endif /* __cplusplus */ + +/* + ======================================================================== + To make a fully customizable malloc.h header file, cut everything + above this line, put into file malloc.h, edit to suit, and #include it + on the next line, as well as in programs that use this malloc. + ======================================================================== +*/ + +/* #include "malloc.h" */ + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef WIN32 +#ifndef __GNUC__ +#pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif +#endif /* WIN32 */ + +#include <stdio.h> /* for printing in malloc_stats */ + +#ifndef LACKS_ERRNO_H +#include <errno.h> /* for MALLOC_FAILURE_ACTION */ +#endif /* LACKS_ERRNO_H */ +#if FOOTERS +#include <time.h> /* for magic initialization */ +#endif /* FOOTERS */ +#ifndef LACKS_STDLIB_H +#include <stdlib.h> /* for abort() */ +#endif /* LACKS_STDLIB_H */ +#ifdef DEBUG +#if ABORT_ON_ASSERT_FAILURE +#define assert(x) if(!(x)) ABORT +#else /* ABORT_ON_ASSERT_FAILURE */ +#include <assert.h> +#endif /* ABORT_ON_ASSERT_FAILURE */ +#else /* DEBUG */ +#ifndef assert +#define assert(x) +#endif +#define DEBUG 0 +#endif /* DEBUG */ +#ifndef LACKS_STRING_H +#include <string.h> /* for memset etc */ +#endif /* LACKS_STRING_H */ +#if USE_BUILTIN_FFS +#ifndef LACKS_STRINGS_H +#include <strings.h> /* for ffs */ +#endif /* LACKS_STRINGS_H */ +#endif /* USE_BUILTIN_FFS */ +#if HAVE_MMAP +#ifndef LACKS_SYS_MMAN_H +#include <sys/mman.h> /* for mmap */ +#endif /* LACKS_SYS_MMAN_H */ +#ifndef LACKS_FCNTL_H +#include <fcntl.h> +#endif /* LACKS_FCNTL_H */ +#endif /* HAVE_MMAP */ +#ifndef LACKS_UNISTD_H +#include <unistd.h> /* for sbrk, sysconf */ +#else /* LACKS_UNISTD_H */ +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) +extern void* sbrk(ptrdiff_t); +#endif /* FreeBSD etc */ +#endif /* LACKS_UNISTD_H */ + +/* Declarations for locking */ +#if USE_LOCKS +#ifndef WIN32 +#include <pthread.h> +#if defined (__SVR4) && defined (__sun) /* solaris */ +#include <thread.h> +#endif /* solaris */ +#else +#ifndef _M_AMD64 +/* These are already defined on AMD64 builds */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +#ifndef __MINGW32__ +LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); +LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); +#endif +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _M_AMD64 */ +#ifndef __MINGW32__ +#pragma intrinsic (_InterlockedCompareExchange) +#pragma intrinsic (_InterlockedExchange) +#else + /* --[ start GCC compatibility ]---------------------------------------------- + * Compatibility <intrin_x86.h> header for GCC -- GCC equivalents of intrinsic + * Microsoft Visual C++ functions. Originally developed for the ReactOS + * (<http://www.reactos.org/>) and TinyKrnl (<http://www.tinykrnl.org/>) + * projects. + * + * Copyright (c) 2006 KJK::Hyperion <hackbunny@reactos.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + + /*** Atomic operations ***/ + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 + #define _ReadWriteBarrier() __sync_synchronize() + #else + static __inline__ __attribute__((always_inline)) long __sync_lock_test_and_set(volatile long * const Target, const long Value) + { + long res; + __asm__ __volatile__("xchg%z0 %2, %0" : "=g" (*(Target)), "=r" (res) : "1" (Value)); + return res; + } + static void __inline__ __attribute__((always_inline)) _MemoryBarrier(void) + { + __asm__ __volatile__("" : : : "memory"); + } + #define _ReadWriteBarrier() _MemoryBarrier() + #endif + /* BUGBUG: GCC only supports full barriers */ + static __inline__ __attribute__((always_inline)) long _InterlockedExchange(volatile long * const Target, const long Value) + { + /* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */ + _ReadWriteBarrier(); + return __sync_lock_test_and_set(Target, Value); + } + /* --[ end GCC compatibility ]---------------------------------------------- */ +#endif +#define interlockedcompareexchange _InterlockedCompareExchange +#define interlockedexchange _InterlockedExchange +#endif /* Win32 */ +#endif /* USE_LOCKS */ + +/* Declarations for bit scanning on win32 */ +#if defined(_MSC_VER) && _MSC_VER>=1300 +#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +unsigned char _BitScanForward(unsigned long *index, unsigned long mask); +unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#define BitScanForward _BitScanForward +#define BitScanReverse _BitScanReverse +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) +#endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + +#ifndef WIN32 +#ifndef malloc_getpagesize +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ +# ifndef _SC_PAGE_SIZE +# define _SC_PAGE_SIZE _SC_PAGESIZE +# endif +# endif +# ifdef _SC_PAGE_SIZE +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE) +# else +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); +# define malloc_getpagesize getpagesize() +# else +# ifdef WIN32 /* use supplied emulation of getpagesize */ +# define malloc_getpagesize getpagesize() +# else +# ifndef LACKS_SYS_PARAM_H +# include <sys/param.h> +# endif +# ifdef EXEC_PAGESIZE +# define malloc_getpagesize EXEC_PAGESIZE +# else +# ifdef NBPG +# ifndef CLSIZE +# define malloc_getpagesize NBPG +# else +# define malloc_getpagesize (NBPG * CLSIZE) +# endif +# else +# ifdef NBPC +# define malloc_getpagesize NBPC +# else +# ifdef PAGESIZE +# define malloc_getpagesize PAGESIZE +# else /* just guess */ +# define malloc_getpagesize ((size_t)4096U) +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif +#endif + + + +/* ------------------- size_t and alignment properties -------------------- */ + +/* The byte and bit size of a size_t */ +#define SIZE_T_SIZE (sizeof(size_t)) +#define SIZE_T_BITSIZE (sizeof(size_t) << 3) + +/* Some constants coerced to size_t */ +/* Annoying but necessary to avoid errors on some platforms */ +#define SIZE_T_ZERO ((size_t)0) +#define SIZE_T_ONE ((size_t)1) +#define SIZE_T_TWO ((size_t)2) +#define SIZE_T_FOUR ((size_t)4) +#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) +#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) +#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) +#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) + +/* The bit mask value corresponding to MALLOC_ALIGNMENT */ +#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) + +/* True if address a has acceptable alignment */ +#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) + +/* the number of bytes to offset an address to align it */ +#define align_offset(A)\ + ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ + ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MORECORE and MMAP must return MFAIL on failure */ +#define MFAIL ((void*)(MAX_SIZE_T)) +#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ + +#if HAVE_MMAP + +#ifndef WIN32 +#define MUNMAP_DEFAULT(a, s) munmap((a), (s)) +#define MMAP_PROT (PROT_READ|PROT_WRITE) +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) +#define MAP_ANONYMOUS MAP_ANON +#endif /* MAP_ANON */ +#ifdef MAP_ANONYMOUS +#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) +#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) +#else /* MAP_ANONYMOUS */ +/* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. +*/ +#define MMAP_FLAGS (MAP_PRIVATE) +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ +#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ + (dev_zero_fd = open("/dev/zero", O_RDWR), \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ + mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) +#endif /* MAP_ANONYMOUS */ + +#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) + +#else /* WIN32 */ + +/* Win32 MMAP via VirtualAlloc */ +static FORCEINLINE void* win32mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ +static FORCEINLINE void* win32direct_mmap(size_t size) { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, + PAGE_READWRITE); + return (ptr != 0)? ptr: MFAIL; +} + +/* This function supports releasing coalesed segments */ +static FORCEINLINE int win32munmap(void* ptr, size_t size) { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = (char*)ptr; + while (size) { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; +} + +#define MMAP_DEFAULT(s) win32mmap(s) +#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) +#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* HAVE_MMAP */ + +#if HAVE_MREMAP +#ifndef WIN32 +#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) +#endif /* WIN32 */ +#endif /* HAVE_MREMAP */ + + +/** + * Define CALL_MORECORE + */ +#if HAVE_MORECORE + #ifdef MORECORE + #define CALL_MORECORE(S) MORECORE(S) + #else /* MORECORE */ + #define CALL_MORECORE(S) MORECORE_DEFAULT(S) + #endif /* MORECORE */ +#else /* HAVE_MORECORE */ + #define CALL_MORECORE(S) MFAIL +#endif /* HAVE_MORECORE */ + +/** + * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP + */ +#if HAVE_MMAP + #define IS_MMAPPED_BIT (SIZE_T_ONE) + #define USE_MMAP_BIT (SIZE_T_ONE) + + #ifdef MMAP + #define CALL_MMAP(s) MMAP(s) + #else /* MMAP */ + #define CALL_MMAP(s) MMAP_DEFAULT(s) + #endif /* MMAP */ + #ifdef MUNMAP + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) + #else /* MUNMAP */ + #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) + #endif /* MUNMAP */ + #ifdef DIRECT_MMAP + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #else /* DIRECT_MMAP */ + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) + #endif /* DIRECT_MMAP */ +#else /* HAVE_MMAP */ + #define IS_MMAPPED_BIT (SIZE_T_ZERO) + #define USE_MMAP_BIT (SIZE_T_ZERO) + + #define MMAP(s) MFAIL + #define MUNMAP(a, s) (-1) + #define DIRECT_MMAP(s) MFAIL + #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) + #define CALL_MMAP(s) MMAP(s) + #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) +#endif /* HAVE_MMAP */ + +/** + * Define CALL_MREMAP + */ +#if HAVE_MMAP && HAVE_MREMAP + #ifdef MREMAP + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else /* MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif /* MREMAP */ +#else /* HAVE_MMAP && HAVE_MREMAP */ + #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL +#endif /* HAVE_MMAP && HAVE_MREMAP */ + +/* mstate bit set if continguous morecore disabled or failed */ +#define USE_NONCONTIGUOUS_BIT (4U) + +/* segment bit set in create_mspace_with_base */ +#define EXTERN_BIT (8U) + + +/* --------------------------- Lock preliminaries ------------------------ */ + +/* + When locks are defined, there is one global lock, plus + one per-mspace lock. + + The global lock_ensures that mparams.magic and other unique + mparams values are initialized only once. It also protects + sequences of calls to MORECORE. In many cases sys_alloc requires + two calls, that should not be interleaved with calls by other + threads. This does not protect against direct calls to MORECORE + by other threads not using this lock, so there is still code to + cope the best we can on interference. + + Per-mspace locks surround calls to malloc, free, etc. To enable use + in layered extensions, per-mspace locks are reentrant. + + Because lock-protected regions generally have bounded times, it is + OK to use the supplied simple spinlocks in the custom versions for + x86. + + If USE_LOCKS is > 1, the definitions of lock routines here are + bypassed, in which case you will need to define at least + INITIAL_LOCK, ACQUIRE_LOCK, RELEASE_LOCK and possibly TRY_LOCK + (which is not used in this malloc, but commonly needed in + extensions.) +*/ + +#if USE_LOCKS == 1 + +#if USE_SPIN_LOCKS +#ifndef WIN32 + +/* Custom pthread-style spin locks on x86 and x64 for gcc */ +struct pthread_mlock_t { + volatile unsigned int l; + volatile unsigned int c; + volatile pthread_t threadid; +}; +#define MLOCK_T struct pthread_mlock_t +#define CURRENT_THREAD pthread_self() +#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) +#define ACQUIRE_LOCK(sl) pthread_acquire_lock(sl) +#define RELEASE_LOCK(sl) pthread_release_lock(sl) +#define TRY_LOCK(sl) pthread_try_lock(sl) +#define SPINS_PER_YIELD 63 + +static MLOCK_T malloc_global_mutex = { 0, 0, 0}; + +static FORCEINLINE int pthread_acquire_lock (MLOCK_T *sl) { + int spins = 0; + volatile unsigned int* lp = &sl->l; + for (;;) { + if (*lp != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 0; + } + } + else { + /* place args to cmpxchgl in locals to evade oddities in some gccs */ + int cmp = 0; + int val = 1; + int ret; + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(lp)), "0"(cmp) + : "memory", "cc"); + if (!ret) { + assert(!sl->threadid); + sl->c = 1; + sl->threadid = CURRENT_THREAD; + return 0; + } + if ((++spins & SPINS_PER_YIELD) == 0) { +#if defined (__SVR4) && defined (__sun) /* solaris */ + thr_yield(); +#else +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) + sched_yield(); +#else /* no-op yield on unknown systems */ + ; +#endif /* __linux__ || __FreeBSD__ || __APPLE__ */ +#endif /* solaris */ + } + } + } +} + +static FORCEINLINE void pthread_release_lock (MLOCK_T *sl) { + assert(sl->l != 0); + assert(sl->threadid == CURRENT_THREAD); + if (--sl->c == 0) { + sl->threadid = 0; + volatile unsigned int* lp = &sl->l; + int prev = 0; + int ret; + __asm__ __volatile__ ("lock; xchgl %0, %1" + : "=r" (ret) + : "m" (*(lp)), "0"(prev) + : "memory"); + } +} + +static FORCEINLINE int pthread_try_lock (MLOCK_T *sl) { + volatile unsigned int* lp = &sl->l; + if (*lp != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 1; + } + } + else { + int cmp = 0; + int val = 1; + int ret; + __asm__ __volatile__ ("lock; cmpxchgl %1, %2" + : "=a" (ret) + : "r" (val), "m" (*(lp)), "0"(cmp) + : "memory", "cc"); + if (!ret) { + assert(!sl->threadid); + sl->c = 1; + sl->threadid = CURRENT_THREAD; + return 1; + } + } + return 0; +} + + +#else /* WIN32 */ +/* Custom win32-style spin locks on x86 and x64 for MSC */ +struct win32_mlock_t +{ + volatile long l; + volatile unsigned int c; + volatile long threadid; +}; + +#define MLOCK_T struct win32_mlock_t +#define CURRENT_THREAD win32_getcurrentthreadid() +#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0) +#define ACQUIRE_LOCK(sl) win32_acquire_lock(sl) +#define RELEASE_LOCK(sl) win32_release_lock(sl) +#define TRY_LOCK(sl) win32_try_lock(sl) +#define SPINS_PER_YIELD 63 + +static MLOCK_T malloc_global_mutex = { 0, 0, 0}; + +static FORCEINLINE long win32_getcurrentthreadid(void) { +#ifdef _MSC_VER +#if defined(_M_IX86) + long *threadstruct=(long *)__readfsdword(0x18); + long threadid=threadstruct[0x24/sizeof(long)]; + return threadid; +#elif defined(_M_X64) + /* todo */ + return GetCurrentThreadId(); +#else + return GetCurrentThreadId(); +#endif +#else + return GetCurrentThreadId(); +#endif +} + +static FORCEINLINE int win32_acquire_lock (MLOCK_T *sl) { + int spins = 0; + for (;;) { + if (sl->l != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 0; + } + } + else { + if (!interlockedexchange(&sl->l, 1)) { + assert(!sl->threadid); + sl->c=CURRENT_THREAD; + sl->threadid = CURRENT_THREAD; + sl->c = 1; + return 0; + } + } + if ((++spins & SPINS_PER_YIELD) == 0) + SleepEx(0, FALSE); + } +} + +static FORCEINLINE void win32_release_lock (MLOCK_T *sl) { + assert(sl->threadid == CURRENT_THREAD); + assert(sl->l != 0); + if (--sl->c == 0) { + sl->threadid = 0; + interlockedexchange (&sl->l, 0); + } +} + +static FORCEINLINE int win32_try_lock (MLOCK_T *sl) { + if(sl->l != 0) { + if (sl->threadid == CURRENT_THREAD) { + ++sl->c; + return 1; + } + } + else { + if (!interlockedexchange(&sl->l, 1)){ + assert(!sl->threadid); + sl->threadid = CURRENT_THREAD; + sl->c = 1; + return 1; + } + } + return 0; +} + +#endif /* WIN32 */ +#else /* USE_SPIN_LOCKS */ + +#ifndef WIN32 +/* pthreads-based locks */ + +#define MLOCK_T pthread_mutex_t +#define CURRENT_THREAD pthread_self() +#define INITIAL_LOCK(sl) pthread_init_lock(sl) +#define ACQUIRE_LOCK(sl) pthread_mutex_lock(sl) +#define RELEASE_LOCK(sl) pthread_mutex_unlock(sl) +#define TRY_LOCK(sl) (!pthread_mutex_trylock(sl)) + +static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Cope with old-style linux recursive lock initialization by adding */ +/* skipped internal declaration from pthread.h */ +#ifdef linux +#ifndef PTHREAD_MUTEX_RECURSIVE +extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, + int __kind)); +#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP +#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) +#endif +#endif + +static int pthread_init_lock (MLOCK_T *sl) { + pthread_mutexattr_t attr; + if (pthread_mutexattr_init(&attr)) return 1; + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; + if (pthread_mutex_init(sl, &attr)) return 1; + if (pthread_mutexattr_destroy(&attr)) return 1; + return 0; +} + +#else /* WIN32 */ +/* Win32 critical sections */ +#define MLOCK_T CRITICAL_SECTION +#define CURRENT_THREAD GetCurrentThreadId() +#define INITIAL_LOCK(s) (!InitializeCriticalSectionAndSpinCount((s), 0x80000000|4000)) +#define ACQUIRE_LOCK(s) (EnterCriticalSection(s), 0) +#define RELEASE_LOCK(s) LeaveCriticalSection(s) +#define TRY_LOCK(s) TryEnterCriticalSection(s) +#define NEED_GLOBAL_LOCK_INIT + +static MLOCK_T malloc_global_mutex; +static volatile long malloc_global_mutex_status; + +/* Use spin loop to initialize global lock */ +static void init_malloc_global_mutex() { + for (;;) { + long stat = malloc_global_mutex_status; + if (stat > 0) + return; + /* transition to < 0 while initializing, then to > 0) */ + if (stat == 0 && + interlockedcompareexchange(&malloc_global_mutex_status, -1, 0) == 0) { + InitializeCriticalSection(&malloc_global_mutex); + interlockedexchange(&malloc_global_mutex_status,1); + return; + } + SleepEx(0, FALSE); + } +} + +#endif /* WIN32 */ +#endif /* USE_SPIN_LOCKS */ +#endif /* USE_LOCKS == 1 */ + +/* ----------------------- User-defined locks ------------------------ */ + +#if USE_LOCKS > 1 +/* Define your own lock implementation here */ +/* #define INITIAL_LOCK(sl) ... */ +/* #define ACQUIRE_LOCK(sl) ... */ +/* #define RELEASE_LOCK(sl) ... */ +/* #define TRY_LOCK(sl) ... */ +/* static MLOCK_T malloc_global_mutex = ... */ +#endif /* USE_LOCKS > 1 */ + +/* ----------------------- Lock-based state ------------------------ */ + +#if USE_LOCKS +#define USE_LOCK_BIT (2U) +#else /* USE_LOCKS */ +#define USE_LOCK_BIT (0U) +#define INITIAL_LOCK(l) +#endif /* USE_LOCKS */ + +#if USE_LOCKS +#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); +#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); +#else /* USE_LOCKS */ +#define ACQUIRE_MALLOC_GLOBAL_LOCK() +#define RELEASE_MALLOC_GLOBAL_LOCK() +#endif /* USE_LOCKS */ + + +/* ----------------------- Chunk representations ------------------------ */ + +/* + (The following includes lightly edited explanations by Colin Plumb.) + + The malloc_chunk declaration below is misleading (but accurate and + necessary). It declares a "view" into memory allowing access to + necessary fields at known offsets from a given base. + + Chunks of memory are maintained using a `boundary tag' method as + originally described by Knuth. (See the paper by Paul Wilson + ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such + techniques.) Sizes of free chunks are stored both in the front of + each chunk and at the end. This makes consolidating fragmented + chunks into bigger chunks fast. The head fields also hold bits + representing whether chunks are free or in use. + + Here are some pictures to make it clearer. They are "exploded" to + show that the state of a chunk can be thought of as extending from + the high 31 bits of the head field of its header through the + prev_foot and PINUSE_BIT bit of the following chunk header. + + A chunk that's in use looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk (if P = 0) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 1| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + +- -+ + | | + +- -+ + | : + +- size - sizeof(size_t) available payload bytes -+ + : | + chunk-> +- -+ + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| + | Size of next chunk (may or may not be in use) | +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + And if it's free, it looks like this: + + chunk-> +- -+ + | User payload (must be in use, or we would have merged!) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| + | Size of this chunk 0| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Prev pointer | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- size - sizeof(struct chunk) unused bytes -+ + : | + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| + | Size of next chunk (must be in use, or we would have merged)| +-+ + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | : + +- User payload -+ + : | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |0| + +-+ + Note that since we always merge adjacent free chunks, the chunks + adjacent to a free chunk must be in use. + + Given a pointer to a chunk (which can be derived trivially from the + payload pointer) we can, in O(1) time, find out whether the adjacent + chunks are free, and if so, unlink them from the lists that they + are on and merge them with the current chunk. + + Chunks always begin on even word boundaries, so the mem portion + (which is returned to the user) is also on an even word boundary, and + thus at least double-word aligned. + + The P (PINUSE_BIT) bit, stored in the unused low-order bit of the + chunk size (which is always a multiple of two words), is an in-use + bit for the *previous* chunk. If that bit is *clear*, then the + word before the current chunk size contains the previous chunk + size, and can be used to find the front of the previous chunk. + The very first chunk allocated always has this bit set, preventing + access to non-existent (or non-owned) memory. If pinuse is set for + any given chunk, then you CANNOT determine the size of the + previous chunk, and might even get a memory addressing fault when + trying to do so. + + The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of + the chunk size redundantly records whether the current chunk is + inuse. This redundancy enables usage checks within free and realloc, + and reduces indirection when freeing and consolidating chunks. + + Each freshly allocated chunk must have both cinuse and pinuse set. + That is, each allocated chunk borders either a previously allocated + and still in-use chunk, or the base of its memory arena. This is + ensured by making all allocations from the `lowest' part of any + found chunk. Further, no free chunk physically borders another one, + so each free chunk is known to be preceded and followed by either + inuse chunks or the ends of memory. + + Note that the `foot' of the current chunk is actually represented + as the prev_foot of the NEXT chunk. This makes it easier to + deal with alignments etc but can be very confusing when trying + to extend or adapt this code. + + The exceptions to all this are + + 1. The special chunk `top' is the top-most available chunk (i.e., + the one bordering the end of available memory). It is treated + specially. Top is never included in any bin, is used only if + no other chunk is available, and is released back to the + system if it is very large (see M_TRIM_THRESHOLD). In effect, + the top chunk is treated as larger (and thus less well + fitting) than any other available chunk. The top chunk + doesn't update its trailing size field since there is no next + contiguous chunk that would have to index off it. However, + space is still allocated for it (TOP_FOOT_SIZE) to enable + separation or merging when space is extended. + + 3. Chunks allocated via mmap, which have the lowest-order bit + (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set + PINUSE_BIT in their head fields. Because they are allocated + one-by-one, each must carry its own prev_foot field, which is + also used to hold the offset this chunk has within its mmapped + region, which is needed to preserve alignment. Each mmapped + chunk is trailed by the first two fields of a fake next-chunk + for sake of usage checks. + +*/ + +struct malloc_chunk { + size_t prev_foot; /* Size of previous chunk (if free). */ + size_t head; /* Size and inuse bits. */ + struct malloc_chunk* fd; /* double links -- used only if free. */ + struct malloc_chunk* bk; +}; + +typedef struct malloc_chunk mchunk; +typedef struct malloc_chunk* mchunkptr; +typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ +typedef unsigned int bindex_t; /* Described below */ +typedef unsigned int binmap_t; /* Described below */ +typedef unsigned int flag_t; /* The type of various bit flag sets */ + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#define MCHUNK_SIZE (sizeof(mchunk)) + +#if FOOTERS +#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +#else /* FOOTERS */ +#define CHUNK_OVERHEAD (SIZE_T_SIZE) +#endif /* FOOTERS */ + +/* MMapped chunks need a second word of overhead ... */ +#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) +/* ... and additional padding for fake next-chunk at foot */ +#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) + +/* The smallest size we can malloc is an aligned minimal chunk */ +#define MIN_CHUNK_SIZE\ + ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* conversion from malloc headers to user pointers, and back */ +#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) +/* chunk associated with aligned address A */ +#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) + +/* Bounds on request (not chunk) sizes. */ +#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) +#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) + +/* pad request bytes into a usable size */ +#define pad_request(req) \ + (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) + +/* pad request, checking for minimum (but not maximum) */ +#define request2size(req) \ + (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use. If the chunk was obtained with mmap, the prev_foot field has + IS_MMAPPED_BIT set, otherwise holding the offset of the base of the + mmapped region to the base of the chunk. + + FLAG4_BIT is not used by this malloc, but might be useful in extensions. +*/ + +#define PINUSE_BIT (SIZE_T_ONE) +#define CINUSE_BIT (SIZE_T_TWO) +#define FLAG4_BIT (SIZE_T_FOUR) +#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) +#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) + +/* Head value for fenceposts */ +#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) + +/* extraction of fields from head words */ +#define cinuse(p) ((p)->head & CINUSE_BIT) +#define pinuse(p) ((p)->head & PINUSE_BIT) +#define chunksize(p) ((p)->head & ~(FLAG_BITS)) + +#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) +#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT) + +/* Treat space at ptr +/- offset as a chunk */ +#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) +#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) + +/* Ptr to next or previous physical malloc_chunk. */ +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) + +/* extract next chunk's pinuse bit */ +#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) + +/* Get/set size at footer */ +#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) + +/* Set size, pinuse bit, and foot */ +#define set_size_and_pinuse_of_free_chunk(p, s)\ + ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) + +/* Set size, pinuse bit, foot, and clear next pinuse */ +#define set_free_with_pinuse(p, s, n)\ + (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) + +#define is_mmapped(p)\ + (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT)) + +/* Get the internal overhead associated with chunk p */ +#define overhead_for(p)\ + (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) + +/* Return true if malloced space is not necessarily cleared */ +#if MMAP_CLEARS +#define calloc_must_clear(p) (!is_mmapped(p)) +#else /* MMAP_CLEARS */ +#define calloc_must_clear(p) (1) +#endif /* MMAP_CLEARS */ + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size as an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +struct malloc_tree_chunk { + /* The first four fields must be compatible with malloc_chunk */ + size_t prev_foot; + size_t head; + struct malloc_tree_chunk* fd; + struct malloc_tree_chunk* bk; + + struct malloc_tree_chunk* child[2]; + struct malloc_tree_chunk* parent; + bindex_t index; +}; + +typedef struct malloc_tree_chunk tchunk; +typedef struct malloc_tree_chunk* tchunkptr; +typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ + +/* A little helper macro for trees */ +#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If IS_MMAPPED_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +struct malloc_segment { + char* base; /* base address */ + size_t size; /* allocated size */ + struct malloc_segment* next; /* ptr to next segment */ + flag_t sflags; /* mmap and extern flag */ +}; + +#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT) +#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) + +typedef struct malloc_segment msegment; +typedef struct malloc_segment* msegmentptr; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams.magic. + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + timming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Locking + If USE_LOCKS is defined, the "mutex" lock is acquired and released + around every public call using this mspace. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. +*/ + +/* Bin types, widths and sizes */ +#define NSMALLBINS (32U) +#define NTREEBINS (32U) +#define SMALLBIN_SHIFT (3U) +#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) +#define TREEBIN_SHIFT (8U) +#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) +#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) +#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) + +struct malloc_state { + binmap_t smallmap; + binmap_t treemap; + size_t dvsize; + size_t topsize; + char* least_addr; + mchunkptr dv; + mchunkptr top; + size_t trim_check; + size_t release_checks; + size_t magic; + mchunkptr smallbins[(NSMALLBINS+1)*2]; + tbinptr treebins[NTREEBINS]; + size_t footprint; + size_t max_footprint; + flag_t mflags; +#if USE_LOCKS + MLOCK_T mutex; /* locate lock among fields that rarely change */ +#endif /* USE_LOCKS */ + msegment seg; + void* extp; /* Unused but available for extensions */ + size_t exts; +}; + +typedef struct malloc_state* mstate; + +/* ------------- Global malloc_state and malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. Note that the non-zeroness of "magic" + also serves as an initialization flag. +*/ + +struct malloc_params { + volatile size_t magic; + size_t page_size; + size_t granularity; + size_t mmap_threshold; + size_t trim_threshold; + flag_t default_mflags; +}; + +static struct malloc_params mparams; + +/* Ensure mparams initialized */ +#define ensure_initialization() ((void)(mparams.magic != 0 || init_mparams())) + +#if !ONLY_MSPACES + +/* The global malloc_state used for all non-"mspace" calls */ +static struct malloc_state _gm_; +#define gm (&_gm_) +#define is_global(M) ((M) == &_gm_) + +#endif /* !ONLY_MSPACES */ + +#define is_initialized(M) ((M)->top != 0) + +/* -------------------------- system alloc setup ------------------------- */ + +/* Operations on mflags */ + +#define use_lock(M) ((M)->mflags & USE_LOCK_BIT) +#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) +#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) + +#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) +#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) +#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) + +#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) +#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) + +#define set_lock(M,L)\ + ((M)->mflags = (L)?\ + ((M)->mflags | USE_LOCK_BIT) :\ + ((M)->mflags & ~USE_LOCK_BIT)) + +/* page-align a size */ +#define page_align(S)\ + (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) + +/* granularity-align a size */ +#define granularity_align(S)\ + (((S) + (mparams.granularity - SIZE_T_ONE))\ + & ~(mparams.granularity - SIZE_T_ONE)) + + +/* For mmap, use granularity alignment on windows, else page-align */ +#ifdef WIN32 +#define mmap_align(S) granularity_align(S) +#else +#define mmap_align(S) page_align(S) +#endif + +/* For sys_alloc, enough padding to ensure can malloc request on success */ +#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) + +#define is_page_aligned(S)\ + (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) +#define is_granularity_aligned(S)\ + (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) + +/* True if segment S holds address A */ +#define segment_holds(S, A)\ + ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) + +/* Return segment holding given address */ +static msegmentptr segment_holding(mstate m, char* addr) { + msegmentptr sp = &m->seg; + for (;;) { + if (addr >= sp->base && addr < sp->base + sp->size) + return sp; + if ((sp = sp->next) == 0) + return 0; + } +} + +/* Return true if segment contains a segment link */ +static int has_segment_link(mstate m, msegmentptr ss) { + msegmentptr sp = &m->seg; + for (;;) { + if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) + return 1; + if ((sp = sp->next) == 0) + return 0; + } +} + +#ifndef MORECORE_CANNOT_TRIM +#define should_trim(M,s) ((s) > (M)->trim_check) +#else /* MORECORE_CANNOT_TRIM */ +#define should_trim(M,s) (0) +#endif /* MORECORE_CANNOT_TRIM */ + +/* + TOP_FOOT_SIZE is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +#define TOP_FOOT_SIZE\ + (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) + + +/* ------------------------------- Hooks -------------------------------- */ + +/* + PREACTION should be defined to return 0 on success, and nonzero on + failure. If you are not using locking, you can redefine these to do + anything you like. +*/ + +#if USE_LOCKS + +#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) +#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } +#else /* USE_LOCKS */ + +#ifndef PREACTION +#define PREACTION(M) (0) +#endif /* PREACTION */ + +#ifndef POSTACTION +#define POSTACTION(M) +#endif /* POSTACTION */ + +#endif /* USE_LOCKS */ + +/* + CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. + USAGE_ERROR_ACTION is triggered on detected bad frees and + reallocs. The argument p is an address that might have triggered the + fault. It is ignored by the two predefined actions, but might be + useful in custom actions that try to help diagnose errors. +*/ + +#if PROCEED_ON_ERROR + +/* A count of the number of corruption errors causing resets */ +int malloc_corruption_error_count; + +/* default corruption action */ +static void reset_on_error(mstate m); + +#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) +#define USAGE_ERROR_ACTION(m, p) + +#else /* PROCEED_ON_ERROR */ + +#ifndef CORRUPTION_ERROR_ACTION +#define CORRUPTION_ERROR_ACTION(m) ABORT +#endif /* CORRUPTION_ERROR_ACTION */ + +#ifndef USAGE_ERROR_ACTION +#define USAGE_ERROR_ACTION(m,p) ABORT +#endif /* USAGE_ERROR_ACTION */ + +#endif /* PROCEED_ON_ERROR */ + +/* -------------------------- Debugging setup ---------------------------- */ + +#if ! DEBUG + +#define check_free_chunk(M,P) +#define check_inuse_chunk(M,P) +#define check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) +#define check_malloc_state(M) +#define check_top_chunk(M,P) + +#else /* DEBUG */ +#define check_free_chunk(M,P) do_check_free_chunk(M,P) +#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) +#define check_top_chunk(M,P) do_check_top_chunk(M,P) +#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) +#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) +#define check_malloc_state(M) do_check_malloc_state(M) + +static void do_check_any_chunk(mstate m, mchunkptr p); +static void do_check_top_chunk(mstate m, mchunkptr p); +static void do_check_mmapped_chunk(mstate m, mchunkptr p); +static void do_check_inuse_chunk(mstate m, mchunkptr p); +static void do_check_free_chunk(mstate m, mchunkptr p); +static void do_check_malloced_chunk(mstate m, void* mem, size_t s); +static void do_check_tree(mstate m, tchunkptr t); +static void do_check_treebin(mstate m, bindex_t i); +static void do_check_smallbin(mstate m, bindex_t i); +static void do_check_malloc_state(mstate m); +static int bin_find(mstate m, mchunkptr x); +static size_t traverse_and_check(mstate m); +#endif /* DEBUG */ + +/* ---------------------------- Indexing Bins ---------------------------- */ + +#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) +#define small_index(s) ((s) >> SMALLBIN_SHIFT) +#define small_index2size(i) ((i) << SMALLBIN_SHIFT) +#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) + +/* addressing by index. See above about smallbin repositioning */ +#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) +#define treebin_at(M,i) (&((M)->treebins[i])) + +/* assign tree index for size S to variable I. Use x86 asm if possible */ +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_tree_index(S, I)\ +{\ + unsigned int X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + __asm__("bsrl\t%1, %0\n\t" : "=r" (K) : "rm" (X));\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K = _bit_scan_reverse (X); \ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int K;\ + _BitScanReverse((DWORD *) &K, X);\ + I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ + }\ +} + +#else /* GNUC */ +#define compute_tree_index(S, I)\ +{\ + size_t X = S >> TREEBIN_SHIFT;\ + if (X == 0)\ + I = 0;\ + else if (X > 0xFFFF)\ + I = NTREEBINS-1;\ + else {\ + unsigned int Y = (unsigned int)X;\ + unsigned int N = ((Y - 0x100) >> 16) & 8;\ + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ + N += K;\ + N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ + K = 14 - N + ((Y <<= K) >> 15);\ + I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ + }\ +} +#endif /* GNUC */ + +/* Bit representing maximum resolved size in a treebin at i */ +#define bit_for_tree_index(i) \ + (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) + +/* Shift placing maximum resolved bit in a treebin at i as sign bit */ +#define leftshift_for_tree_index(i) \ + ((i == NTREEBINS-1)? 0 : \ + ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) + +/* The size of the smallest chunk held in bin with index i */ +#define minsize_for_tree_index(i) \ + ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ + (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) + + +/* ------------------------ Operations on bin maps ----------------------- */ + +/* bit corresponding to given index */ +#define idx2bit(i) ((binmap_t)(1) << (i)) + +/* Mark/Clear bits with given index */ +#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) +#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) +#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) + +#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) +#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) +#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) + +/* isolate the least set bit of a bitmap */ +#define least_bit(x) ((x) & -(x)) + +/* mask with all bits to left of least bit of x on */ +#define left_bits(x) ((x<<1) | -(x<<1)) + +/* mask with all bits to left of or equal to least bit of x on */ +#define same_or_left_bits(x) ((x) | -(x)) + +/* index corresponding to given bit. Use x86 asm if possible */ + +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + __asm__("bsfl\t%1, %0\n\t" : "=r" (J) : "rm" (X));\ + I = (bindex_t)J;\ +} + +#elif defined (__INTEL_COMPILER) +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + J = _bit_scan_forward (X); \ + I = (bindex_t)J;\ +} + +#elif defined(_MSC_VER) && _MSC_VER>=1300 +#define compute_bit2idx(X, I)\ +{\ + unsigned int J;\ + _BitScanForward((DWORD *) &J, X);\ + I = (bindex_t)J;\ +} + +#elif USE_BUILTIN_FFS +#define compute_bit2idx(X, I) I = ffs(X)-1 + +#else +#define compute_bit2idx(X, I)\ +{\ + unsigned int Y = X - 1;\ + unsigned int K = Y >> (16-4) & 16;\ + unsigned int N = K; Y >>= K;\ + N += K = Y >> (8-3) & 8; Y >>= K;\ + N += K = Y >> (4-2) & 4; Y >>= K;\ + N += K = Y >> (2-1) & 2; Y >>= K;\ + N += K = Y >> (1-0) & 1; Y >>= K;\ + I = (bindex_t)(N + Y);\ +} +#endif /* GNUC */ + + +/* ----------------------- Runtime Check Support ------------------------- */ + +/* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probablistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. +*/ + +#if !INSECURE +/* Check if address a is at least as high as any from MORECORE or MMAP */ +#define ok_address(M, a) ((char*)(a) >= (M)->least_addr) +/* Check if address of next chunk n is higher than base chunk p */ +#define ok_next(p, n) ((char*)(p) < (char*)(n)) +/* Check if p has its cinuse bit on */ +#define ok_cinuse(p) cinuse(p) +/* Check if p has its pinuse bit on */ +#define ok_pinuse(p) pinuse(p) + +#else /* !INSECURE */ +#define ok_address(M, a) (1) +#define ok_next(b, n) (1) +#define ok_cinuse(p) (1) +#define ok_pinuse(p) (1) +#endif /* !INSECURE */ + +#if (FOOTERS && !INSECURE) +/* Check if (alleged) mstate m has expected magic field */ +#define ok_magic(M) ((M)->magic == mparams.magic) +#else /* (FOOTERS && !INSECURE) */ +#define ok_magic(M) (1) +#endif /* (FOOTERS && !INSECURE) */ + + +/* In gcc, use __builtin_expect to minimize impact of checks */ +#if !INSECURE +#if defined(__GNUC__) && __GNUC__ >= 3 +#define RTCHECK(e) __builtin_expect(e, 1) +#else /* GNUC */ +#define RTCHECK(e) (e) +#endif /* GNUC */ +#else /* !INSECURE */ +#define RTCHECK(e) (1) +#endif /* !INSECURE */ + +/* macros to set up inuse chunks with or without footers */ + +#if !FOOTERS + +#define mark_inuse_foot(M,p,s) + +/* Set cinuse bit and pinuse bit of next chunk */ +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set cinuse and pinuse of this chunk and pinuse of next chunk */ +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) + +/* Set size, cinuse and pinuse bit of this chunk */ +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) + +#else /* FOOTERS */ + +/* Set foot of inuse chunk to be xor of mstate and seed */ +#define mark_inuse_foot(M,p,s)\ + (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) + +#define get_mstate_for(p)\ + ((mstate)(((mchunkptr)((char*)(p) +\ + (chunksize(p))))->prev_foot ^ mparams.magic)) + +#define set_inuse(M,p,s)\ + ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ + mark_inuse_foot(M,p,s)) + +#define set_inuse_and_pinuse(M,p,s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ + mark_inuse_foot(M,p,s)) + +#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ + ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ + mark_inuse_foot(M, p, s)) + +#endif /* !FOOTERS */ + +/* ---------------------------- setting mparams -------------------------- */ + +/* Initialize mparams */ +static int init_mparams(void) { +#ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) + init_malloc_global_mutex(); +#endif + + ACQUIRE_MALLOC_GLOBAL_LOCK(); + if (mparams.magic == 0) { + size_t magic; + size_t psize; + size_t gsize; + +#ifndef WIN32 + psize = malloc_getpagesize; + gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); +#else /* WIN32 */ + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + psize = system_info.dwPageSize; + gsize = ((DEFAULT_GRANULARITY != 0)? + DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + } +#endif /* WIN32 */ + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (MAX_SIZE_T < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (MALLOC_ALIGNMENT < (size_t)8U) || + ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || + ((gsize & (gsize-SIZE_T_ONE)) != 0) || + ((psize & (psize-SIZE_T_ONE)) != 0)) + ABORT; + + mparams.granularity = gsize; + mparams.page_size = psize; + mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; + mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; +#if MORECORE_CONTIGUOUS + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; +#else /* MORECORE_CONTIGUOUS */ + mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; +#endif /* MORECORE_CONTIGUOUS */ + +#if !ONLY_MSPACES + /* Set up lock for main malloc area */ + gm->mflags = mparams.default_mflags; + INITIAL_LOCK(&gm->mutex); +#endif + +#if (FOOTERS && !INSECURE) + { +#if USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + /* Try to use /dev/urandom, else fall back on using time */ + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) { + magic = *((size_t *) buf); + close(fd); + } + else +#endif /* USE_DEV_RANDOM */ +#ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); +#else + magic = (size_t)(time(0) ^ (size_t)0x55555555U); +#endif + magic |= (size_t)8U; /* ensure nonzero */ + magic &= ~(size_t)7U; /* improve chances of fault for bad values */ + } +#else /* (FOOTERS && !INSECURE) */ + magic = (size_t)0x58585858U; +#endif /* (FOOTERS && !INSECURE) */ + + mparams.magic = magic; + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + return 1; +} + +/* support for mallopt */ +static int change_mparam(int param_number, int value) { + size_t val = (value == -1)? MAX_SIZE_T : (size_t)value; + ensure_initialization(); + switch(param_number) { + case M_TRIM_THRESHOLD: + mparams.trim_threshold = val; + return 1; + case M_GRANULARITY: + if (val >= mparams.page_size && ((val & (val-1)) == 0)) { + mparams.granularity = val; + return 1; + } + else + return 0; + case M_MMAP_THRESHOLD: + mparams.mmap_threshold = val; + return 1; + default: + return 0; + } +} + +#if DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +/* Check properties of any chunk, whether free, inuse, mmapped etc */ +static void do_check_any_chunk(mstate m, mchunkptr p) { + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); +} + +/* Check properties of top chunk */ +static void do_check_top_chunk(mstate m, mchunkptr p) { + msegmentptr sp = segment_holding(m, (char*)p); + size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ + assert(sp != 0); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(sz == m->topsize); + assert(sz > 0); + assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); + assert(pinuse(p)); + assert(!pinuse(chunk_plus_offset(p, sz))); +} + +/* Check properties of (inuse) mmapped chunks */ +static void do_check_mmapped_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD); + assert(is_mmapped(p)); + assert(use_mmap(m)); + assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); + assert(ok_address(m, p)); + assert(!is_small(sz)); + assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); + assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); + assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); +} + +/* Check properties of inuse chunks */ +static void do_check_inuse_chunk(mstate m, mchunkptr p) { + do_check_any_chunk(m, p); + assert(cinuse(p)); + assert(next_pinuse(p)); + /* If not pinuse and not mmapped, previous chunk has OK offset */ + assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); + if (is_mmapped(p)) + do_check_mmapped_chunk(m, p); +} + +/* Check properties of free chunks */ +static void do_check_free_chunk(mstate m, mchunkptr p) { + size_t sz = chunksize(p); + mchunkptr next = chunk_plus_offset(p, sz); + do_check_any_chunk(m, p); + assert(!cinuse(p)); + assert(!next_pinuse(p)); + assert (!is_mmapped(p)); + if (p != m->dv && p != m->top) { + if (sz >= MIN_CHUNK_SIZE) { + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(is_aligned(chunk2mem(p))); + assert(next->prev_foot == sz); + assert(pinuse(p)); + assert (next == m->top || cinuse(next)); + assert(p->fd->bk == p); + assert(p->bk->fd == p); + } + else /* markers are always of size SIZE_T_SIZE */ + assert(sz == SIZE_T_SIZE); + } +} + +/* Check properties of malloced chunks at the point they are malloced */ +static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT); + do_check_inuse_chunk(m, p); + assert((sz & CHUNK_ALIGN_MASK) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ + assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); + } +} + +/* Check a tree and its subtrees. */ +static void do_check_tree(mstate m, tchunkptr t) { + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->index; + size_t tsize = chunksize(t); + bindex_t idx; + compute_tree_index(tsize, idx); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); + + do { /* traverse through chain of same-sized nodes */ + do_check_any_chunk(m, ((mchunkptr)u)); + assert(u->index == tindex); + assert(chunksize(u) == tsize); + assert(!cinuse(u)); + assert(!next_pinuse(u)); + assert(u->fd->bk == u); + assert(u->bk->fd == u); + if (u->parent == 0) { + assert(u->child[0] == 0); + assert(u->child[1] == 0); + } + else { + assert(head == 0); /* only one node on chain has parent */ + head = u; + assert(u->parent != u); + assert (u->parent->child[0] == u || + u->parent->child[1] == u || + *((tbinptr*)(u->parent)) == u); + if (u->child[0] != 0) { + assert(u->child[0]->parent == u); + assert(u->child[0] != u); + do_check_tree(m, u->child[0]); + } + if (u->child[1] != 0) { + assert(u->child[1]->parent == u); + assert(u->child[1] != u); + do_check_tree(m, u->child[1]); + } + if (u->child[0] != 0 && u->child[1] != 0) { + assert(chunksize(u->child[0]) < chunksize(u->child[1])); + } + } + u = u->fd; + } while (u != t); + assert(head != 0); +} + +/* Check all the chunks in a treebin. */ +static void do_check_treebin(mstate m, bindex_t i) { + tbinptr* tb = treebin_at(m, i); + tchunkptr t = *tb; + int empty = (m->treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(m, t); +} + +/* Check all the chunks in a smallbin. */ +static void do_check_smallbin(mstate m, bindex_t i) { + sbinptr b = smallbin_at(m, i); + mchunkptr p = b->bk; + unsigned int empty = (m->smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) { + for (; p != b; p = p->bk) { + size_t size = chunksize(p); + mchunkptr q; + /* each chunk claims to be free */ + do_check_free_chunk(m, p); + /* chunk belongs in bin */ + assert(small_index(size) == i); + assert(p->bk == b || chunksize(p->bk) == chunksize(p)); + /* chunk is followed by an inuse chunk */ + q = next_chunk(p); + if (q->head != FENCEPOST_HEAD) + do_check_inuse_chunk(m, q); + } + } +} + +/* Find x in a bin. Used in other check functions. */ +static int bin_find(mstate m, mchunkptr x) { + size_t size = chunksize(x); + if (is_small(size)) { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(m, sidx); + if (smallmap_is_marked(m, sidx)) { + mchunkptr p = b; + do { + if (p == x) + return 1; + } while ((p = p->fd) != b); + } + } + else { + bindex_t tidx; + compute_tree_index(size, tidx); + if (treemap_is_marked(m, tidx)) { + tchunkptr t = *treebin_at(m, tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && chunksize(t) != size) { + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + sizebits <<= 1; + } + if (t != 0) { + tchunkptr u = t; + do { + if (u == (tchunkptr)x) + return 1; + } while ((u = u->fd) != t); + } + } + } + return 0; +} + +/* Traverse each chunk and check it; return total */ +static size_t traverse_and_check(mstate m) { + size_t sum = 0; + if (is_initialized(m)) { + msegmentptr s = &m->seg; + sum += m->topsize + TOP_FOOT_SIZE; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + mchunkptr lastq = 0; + assert(pinuse(q)); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + sum += chunksize(q); + if (cinuse(q)) { + assert(!bin_find(m, q)); + do_check_inuse_chunk(m, q); + } + else { + assert(q == m->dv || bin_find(m, q)); + assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */ + do_check_free_chunk(m, q); + } + lastq = q; + q = next_chunk(q); + } + s = s->next; + } + } + return sum; +} + +/* Check all properties of malloc_state. */ +static void do_check_malloc_state(mstate m) { + bindex_t i; + size_t total; + /* check bins */ + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(m, i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(m, i); + + if (m->dvsize != 0) { /* check dv chunk */ + do_check_any_chunk(m, m->dv); + assert(m->dvsize == chunksize(m->dv)); + assert(m->dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(m, m->dv) == 0); + } + + if (m->top != 0) { /* check top chunk */ + do_check_top_chunk(m, m->top); + /*assert(m->topsize == chunksize(m->top)); redundant */ + assert(m->topsize > 0); + assert(bin_find(m, m->top) == 0); + } + + total = traverse_and_check(m); + assert(total <= m->footprint); + assert(m->footprint <= m->max_footprint); +} +#endif /* DEBUG */ + +/* ----------------------------- statistics ------------------------------ */ + +#if !NO_MALLINFO +static struct mallinfo internal_mallinfo(mstate m) { + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + ensure_initialization(); + if (!PREACTION(m)) { + check_malloc_state(m); + if (is_initialized(m)) { + size_t nfree = SIZE_T_ONE; /* top always free */ + size_t mfree = m->topsize + TOP_FOOT_SIZE; + size_t sum = mfree; + msegmentptr s = &m->seg; + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + size_t sz = chunksize(q); + sum += sz; + if (!cinuse(q)) { + mfree += sz; + ++nfree; + } + q = next_chunk(q); + } + s = s->next; + } + + nm.arena = sum; + nm.ordblks = nfree; + nm.hblkhd = m->footprint - sum; + nm.usmblks = m->max_footprint; + nm.uordblks = m->footprint - mfree; + nm.fordblks = mfree; + nm.keepcost = m->topsize; + } + + POSTACTION(m); + } + return nm; +} +#endif /* !NO_MALLINFO */ + +static void internal_malloc_stats(mstate m) { + ensure_initialization(); + if (!PREACTION(m)) { + size_t maxfp = 0; + size_t fp = 0; + size_t used = 0; + check_malloc_state(m); + if (is_initialized(m)) { + msegmentptr s = &m->seg; + maxfp = m->max_footprint; + fp = m->footprint; + used = fp - (m->topsize + TOP_FOOT_SIZE); + + while (s != 0) { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && + q != m->top && q->head != FENCEPOST_HEAD) { + if (!cinuse(q)) + used -= chunksize(q); + q = next_chunk(q); + } + s = s->next; + } + } + + fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp)); + fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp)); + fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used)); + + POSTACTION(m); + } +} + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +/* Link a free chunk into a smallbin */ +#define insert_small_chunk(M, P, S) {\ + bindex_t I = small_index(S);\ + mchunkptr B = smallbin_at(M, I);\ + mchunkptr F = B;\ + assert(S >= MIN_CHUNK_SIZE);\ + if (!smallmap_is_marked(M, I))\ + mark_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, B->fd)))\ + F = B->fd;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + B->fd = P;\ + F->bk = P;\ + P->fd = F;\ + P->bk = B;\ +} + +/* Unlink a chunk from a smallbin */ +#define unlink_small_chunk(M, P, S) {\ + mchunkptr F = P->fd;\ + mchunkptr B = P->bk;\ + bindex_t I = small_index(S);\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (F == B)\ + clear_smallmap(M, I);\ + else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\ + (B == smallbin_at(M,I) || ok_address(M, B)))) {\ + F->bk = B;\ + B->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + +/* Unlink the first chunk from a smallbin */ +#define unlink_first_small_chunk(M, B, P, I) {\ + mchunkptr F = P->fd;\ + assert(P != B);\ + assert(P != F);\ + assert(chunksize(P) == small_index2size(I));\ + if (B == F)\ + clear_smallmap(M, I);\ + else if (RTCHECK(ok_address(M, F))) {\ + B->fd = F;\ + F->bk = B;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ +} + + + +/* Replace dv node, binning the old one */ +/* Used only when dvsize known to be small */ +#define replace_dv(M, P, S) {\ + size_t DVS = M->dvsize;\ + if (DVS != 0) {\ + mchunkptr DV = M->dv;\ + assert(is_small(DVS));\ + insert_small_chunk(M, DV, DVS);\ + }\ + M->dvsize = S;\ + M->dv = P;\ +} + +/* ------------------------- Operations on trees ------------------------- */ + +/* Insert chunk into tree */ +#define insert_large_chunk(M, X, S) {\ + tbinptr* H;\ + bindex_t I;\ + compute_tree_index(S, I);\ + H = treebin_at(M, I);\ + X->index = I;\ + X->child[0] = X->child[1] = 0;\ + if (!treemap_is_marked(M, I)) {\ + mark_treemap(M, I);\ + *H = X;\ + X->parent = (tchunkptr)H;\ + X->fd = X->bk = X;\ + }\ + else {\ + tchunkptr T = *H;\ + size_t K = S << leftshift_for_tree_index(I);\ + for (;;) {\ + if (chunksize(T) != S) {\ + tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ + K <<= 1;\ + if (*C != 0)\ + T = *C;\ + else if (RTCHECK(ok_address(M, C))) {\ + *C = X;\ + X->parent = T;\ + X->fd = X->bk = X;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + else {\ + tchunkptr F = T->fd;\ + if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ + T->fd = F->bk = X;\ + X->fd = F;\ + X->bk = T;\ + X->parent = 0;\ + break;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + break;\ + }\ + }\ + }\ + }\ +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendants + correspond properly to bit masks. We use the rightmost descendant + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +#define unlink_large_chunk(M, X) {\ + tchunkptr XP = X->parent;\ + tchunkptr R;\ + if (X->bk != X) {\ + tchunkptr F = X->fd;\ + R = X->bk;\ + if (RTCHECK(ok_address(M, F))) {\ + F->bk = R;\ + R->fd = F;\ + }\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else {\ + tchunkptr* RP;\ + if (((R = *(RP = &(X->child[1]))) != 0) ||\ + ((R = *(RP = &(X->child[0]))) != 0)) {\ + tchunkptr* CP;\ + while ((*(CP = &(R->child[1])) != 0) ||\ + (*(CP = &(R->child[0])) != 0)) {\ + R = *(RP = CP);\ + }\ + if (RTCHECK(ok_address(M, RP)))\ + *RP = 0;\ + else {\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + }\ + if (XP != 0) {\ + tbinptr* H = treebin_at(M, X->index);\ + if (X == *H) {\ + if ((*H = R) == 0) \ + clear_treemap(M, X->index);\ + }\ + else if (RTCHECK(ok_address(M, XP))) {\ + if (XP->child[0] == X) \ + XP->child[0] = R;\ + else \ + XP->child[1] = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + if (R != 0) {\ + if (RTCHECK(ok_address(M, R))) {\ + tchunkptr C0, C1;\ + R->parent = XP;\ + if ((C0 = X->child[0]) != 0) {\ + if (RTCHECK(ok_address(M, C0))) {\ + R->child[0] = C0;\ + C0->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + if ((C1 = X->child[1]) != 0) {\ + if (RTCHECK(ok_address(M, C1))) {\ + R->child[1] = C1;\ + C1->parent = R;\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ + else\ + CORRUPTION_ERROR_ACTION(M);\ + }\ + }\ +} + +/* Relays to large vs small bin operations */ + +#define insert_chunk(M, P, S)\ + if (is_small(S)) insert_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } + +#define unlink_chunk(M, P, S)\ + if (is_small(S)) unlink_small_chunk(M, P, S)\ + else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } + + +/* Relays to internal calls to malloc/free from realloc, memalign etc */ + +#if ONLY_MSPACES +#define internal_malloc(m, b) mspace_malloc(m, b) +#define internal_free(m, mem) mspace_free(m,mem); +#else /* ONLY_MSPACES */ +#if MSPACES +#define internal_malloc(m, b)\ + (m == gm)? dlmalloc(b) : mspace_malloc(m, b) +#define internal_free(m, mem)\ + if (m == gm) dlfree(mem); else mspace_free(m,mem); +#else /* MSPACES */ +#define internal_malloc(m, b) dlmalloc(b) +#define internal_free(m, mem) dlfree(mem) +#endif /* MSPACES */ +#endif /* ONLY_MSPACES */ + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). There is also enough space + allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain + the PINUSE bit so frees can be checked. +*/ + +/* Malloc using mmap */ +static void* mmap_alloc(mstate m, size_t nb) { + size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + if (mmsize > nb) { /* Check for wrap around 0 */ + char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); + if (mm != CMFAIL) { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->prev_foot = offset | IS_MMAPPED_BIT; + (p)->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, p, psize); + chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; + + if (mm < m->least_addr) + m->least_addr = mm; + if ((m->footprint += mmsize) > m->max_footprint) + m->max_footprint = m->footprint; + assert(is_aligned(chunk2mem(p))); + check_mmapped_chunk(m, p); + return chunk2mem(p); + } + } + return 0; +} + +/* Realloc using mmap */ +static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) { + size_t oldsize = chunksize(oldp); + if (is_small(nb)) /* Can't shrink mmap regions below small size */ + return 0; + /* Keep old chunk if big enough but not too big */ + if (oldsize >= nb + SIZE_T_SIZE && + (oldsize - nb) <= (mparams.granularity << 1)) + return oldp; + else { + size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT; + size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; + size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + char* cp = (char*)CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, 1); + if (cp != CMFAIL) { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - MMAP_FOOT_PAD; + newp->head = (psize|CINUSE_BIT); + mark_inuse_foot(m, newp, psize); + chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; + chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; + + if (cp < m->least_addr) + m->least_addr = cp; + if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) + m->max_footprint = m->footprint; + check_mmapped_chunk(m, newp); + return newp; + } + } + return 0; +} + +/* -------------------------- mspace management -------------------------- */ + +/* Initialize top chunk and its size */ +static void init_top(mstate m, mchunkptr p, size_t psize) { + /* Ensure alignment */ + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + m->top = p; + m->topsize = psize; + p->head = psize | PINUSE_BIT; + /* set size of fake trailing chunk holding overhead space only once */ + chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; + m->trim_check = mparams.trim_threshold; /* reset on each update */ +} + +/* Initialize bins for a new mstate that is otherwise zeroed out */ +static void init_bins(mstate m) { + /* Establish circular links for smallbins */ + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) { + sbinptr bin = smallbin_at(m,i); + bin->fd = bin->bk = bin; + } +} + +#if PROCEED_ON_ERROR + +/* default corruption action */ +static void reset_on_error(mstate m) { + int i; + ++malloc_corruption_error_count; + /* Reinitialize fields to forget about all memory */ + m->smallbins = m->treebins = 0; + m->dvsize = m->topsize = 0; + m->seg.base = 0; + m->seg.size = 0; + m->seg.next = 0; + m->top = m->dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(m, i) = 0; + init_bins(m); +} +#endif /* PROCEED_ON_ERROR */ + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +static void* prepend_alloc(mstate m, char* newbase, char* oldbase, + size_t nb) { + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = chunk_plus_offset(p, nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + + assert((char*)oldfirst > (char*)q); + assert(pinuse(oldfirst)); + assert(qsize >= MIN_CHUNK_SIZE); + + /* consolidate remainder with first chunk of old base */ + if (oldfirst == m->top) { + size_t tsize = m->topsize += qsize; + m->top = q; + q->head = tsize | PINUSE_BIT; + check_top_chunk(m, q); + } + else if (oldfirst == m->dv) { + size_t dsize = m->dvsize += qsize; + m->dv = q; + set_size_and_pinuse_of_free_chunk(q, dsize); + } + else { + if (!cinuse(oldfirst)) { + size_t nsize = chunksize(oldfirst); + unlink_chunk(m, oldfirst, nsize); + oldfirst = chunk_plus_offset(oldfirst, nsize); + qsize += nsize; + } + set_free_with_pinuse(q, qsize, oldfirst); + insert_chunk(m, q, qsize); + check_free_chunk(m, q); + } + + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); +} + +/* Add a segment to hold a new noncontiguous region */ +static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { + /* Determine locations and sizes of segment, fenceposts, old top */ + char* old_top = (char*)m->top; + msegmentptr oldsp = segment_holding(m, old_top); + char* old_end = oldsp->base + oldsp->size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = chunk_plus_offset(sp, ssize); + mchunkptr p = tnext; + int nfences = 0; + + /* reset top to new space */ + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + + /* Set up segment record */ + assert(is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); + *ss = m->seg; /* Push current record */ + m->seg.base = tbase; + m->seg.size = tsize; + m->seg.sflags = mmapped; + m->seg.next = ss; + + /* Insert trailing fenceposts */ + for (;;) { + mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); + p->head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + /* Insert the rest of old top into a bin as an ordinary free chunk */ + if (csp != old_top) { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = chunk_plus_offset(q, psize); + set_free_with_pinuse(q, psize, tn); + insert_chunk(m, q, psize); + } + + check_top_chunk(m, m->top); +} + +/* -------------------------- System allocation -------------------------- */ + +/* Get memory from system using MORECORE or MMAP */ +static void* sys_alloc(mstate m, size_t nb) { + char* tbase = CMFAIL; + size_t tsize = 0; + flag_t mmap_flag = 0; + + ensure_initialization(); + + /* Directly map large chunks */ + if (use_mmap(m) && nb >= mparams.mmap_threshold) { + void* mem = mmap_alloc(m, nb); + if (mem != 0) + return mem; + } + + /* + Try getting memory in any of three ways (in most-preferred to + least-preferred order): + 1. A call to MORECORE that can normally contiguously extend memory. + (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or + main space is mmapped or a previous contiguous call failed) + 2. A call to MMAP new space (disabled if not HAVE_MMAP). + Note that under the default settings, if MORECORE is unable to + fulfill a request, and HAVE_MMAP is true, then mmap is + used as a noncontiguous system allocator. This is a useful backup + strategy for systems with holes in address spaces -- in this case + sbrk cannot contiguously expand the heap, but mmap may be able to + find space. + 3. A call to MORECORE that cannot usually contiguously extend memory. + (disabled if not HAVE_MORECORE) + + In all cases, we need to request enough bytes from system to ensure + we can malloc nb bytes upon success, so pad with enough space for + top_foot, plus alignment-pad to make sure we don't lose bytes if + not on boundary, and round this up to a granularity unit. + */ + + if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { + char* br = CMFAIL; + msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); + size_t asize = 0; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + + if (ss == 0) { /* First time through or recovery */ + char* base = (char*)CALL_MORECORE(0); + if (base != CMFAIL) { + asize = granularity_align(nb + SYS_ALLOC_PADDING); + /* Adjust to end on a page boundary */ + if (!is_page_aligned(base)) + asize += (page_align((size_t)base) - (size_t)base); + /* Can't call MORECORE if size is negative when treated as signed */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == base) { + tbase = base; + tsize = asize; + } + } + } + else { + /* Subtract out existing available top space from MORECORE request. */ + asize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); + /* Use mem here only if it did continuously extend old space */ + if (asize < HALF_MAX_SIZE_T && + (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) { + tbase = br; + tsize = asize; + } + } + + if (tbase == CMFAIL) { /* Cope with partial failure */ + if (br != CMFAIL) { /* Try to use/extend the space we did get */ + if (asize < HALF_MAX_SIZE_T && + asize < nb + SYS_ALLOC_PADDING) { + size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - asize); + if (esize < HALF_MAX_SIZE_T) { + char* end = (char*)CALL_MORECORE(esize); + if (end != CMFAIL) + asize += esize; + else { /* Can't use; try to release */ + (void) CALL_MORECORE(-asize); + br = CMFAIL; + } + } + } + } + if (br != CMFAIL) { /* Use the space we did get */ + tbase = br; + tsize = asize; + } + else + disable_contiguous(m); /* Don't try contiguous path in the future */ + } + + RELEASE_MALLOC_GLOBAL_LOCK(); + } + + if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ + size_t rsize = granularity_align(nb + SYS_ALLOC_PADDING); + if (rsize > nb) { /* Fail if wraps around zero */ + char* mp = (char*)(CALL_MMAP(rsize)); + if (mp != CMFAIL) { + tbase = mp; + tsize = rsize; + mmap_flag = IS_MMAPPED_BIT; + } + } + } + + if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ + size_t asize = granularity_align(nb + SYS_ALLOC_PADDING); + if (asize < HALF_MAX_SIZE_T) { + char* br = CMFAIL; + char* end = CMFAIL; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + br = (char*)(CALL_MORECORE(asize)); + end = (char*)(CALL_MORECORE(0)); + RELEASE_MALLOC_GLOBAL_LOCK(); + if (br != CMFAIL && end != CMFAIL && br < end) { + size_t ssize = end - br; + if (ssize > nb + TOP_FOOT_SIZE) { + tbase = br; + tsize = ssize; + } + } + } + } + + if (tbase != CMFAIL) { + + if ((m->footprint += tsize) > m->max_footprint) + m->max_footprint = m->footprint; + + if (!is_initialized(m)) { /* first-time initialization */ + m->seg.base = m->least_addr = tbase; + m->seg.size = tsize; + m->seg.sflags = mmap_flag; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + init_bins(m); +#if !ONLY_MSPACES + if (is_global(m)) + init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); + else +#endif + { + /* Offset top by embedded malloc_state */ + mchunkptr mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); + } + } + + else { + /* Try to merge with an existing segment */ + msegmentptr sp = &m->seg; + /* Only consider most recent segment if traversal suppressed */ + while (sp != 0 && tbase != sp->base + sp->size) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && + segment_holds(sp, m->top)) { /* append */ + sp->size += tsize; + init_top(m, m->top, m->topsize + tsize); + } + else { + if (tbase < m->least_addr) + m->least_addr = tbase; + sp = &m->seg; + while (sp != 0 && sp->base != tbase + tsize) + sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; + if (sp != 0 && + !is_extern_segment(sp) && + (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) { + char* oldbase = sp->base; + sp->base = tbase; + sp->size += tsize; + return prepend_alloc(m, tbase, oldbase, nb); + } + else + add_segment(m, tbase, tsize, mmap_flag); + } + } + + if (nb < m->topsize) { /* Allocate from new or extended top space */ + size_t rsize = m->topsize -= nb; + mchunkptr p = m->top; + mchunkptr r = m->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(m, p, nb); + check_top_chunk(m, m->top); + check_malloced_chunk(m, chunk2mem(p), nb); + return chunk2mem(p); + } + } + + MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +/* Unmap and unlink any mmapped segments that don't contain used chunks */ +static size_t release_unused_segments(mstate m) { + size_t released = 0; + int nsegs = 0; + msegmentptr pred = &m->seg; + msegmentptr sp = pred->next; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + msegmentptr next = sp->next; + ++nsegs; + if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { + mchunkptr p = align_as_chunk(base); + size_t psize = chunksize(p); + /* Can unmap if first chunk holds entire segment and not pinned */ + if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, (char*)sp)); + if (p == m->dv) { + m->dv = 0; + m->dvsize = 0; + } + else { + unlink_large_chunk(m, tp); + } + if (CALL_MUNMAP(base, size) == 0) { + released += size; + m->footprint -= size; + /* unlink obsoleted record */ + sp = pred; + sp->next = next; + } + else { /* back out if cannot unmap */ + insert_large_chunk(m, tp, psize); + } + } + } + if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ + break; + pred = sp; + sp = next; + } + /* Reset check counter */ + m->release_checks = ((nsegs > MAX_RELEASE_CHECK_RATE)? + nsegs : MAX_RELEASE_CHECK_RATE); + return released; +} + +static int sys_trim(mstate m, size_t pad) { + size_t released = 0; + ensure_initialization(); + if (pad < MAX_REQUEST && is_initialized(m)) { + pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ + + if (m->topsize > pad) { + /* Shrink top space in granularity-size units, keeping at least one */ + size_t unit = mparams.granularity; + size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - + SIZE_T_ONE) * unit; + msegmentptr sp = segment_holding(m, (char*)m->top); + + if (!is_extern_segment(sp)) { + if (is_mmapped_segment(sp)) { + if (HAVE_MMAP && + sp->size >= extra && + !has_segment_link(m, sp)) { /* can't shrink if pinned */ + size_t newsize = sp->size - extra; + /* Prefer mremap, fall back to munmap */ + if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || + (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { + released = extra; + } + } + } + else if (HAVE_MORECORE) { + if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ + extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; + ACQUIRE_MALLOC_GLOBAL_LOCK(); + { + /* Make sure end of memory is where we last set it. */ + char* old_br = (char*)(CALL_MORECORE(0)); + if (old_br == sp->base + sp->size) { + char* rel_br = (char*)(CALL_MORECORE(-extra)); + char* new_br = (char*)(CALL_MORECORE(0)); + if (rel_br != CMFAIL && new_br < old_br) + released = old_br - new_br; + } + } + RELEASE_MALLOC_GLOBAL_LOCK(); + } + } + + if (released != 0) { + sp->size -= released; + m->footprint -= released; + init_top(m, m->top, m->topsize - released); + check_top_chunk(m, m->top); + } + } + + /* Unmap any unused mmapped segments */ + if (HAVE_MMAP) + released += release_unused_segments(m); + + /* On failure, disable autotrim to avoid repeated failed future calls */ + if (released == 0 && m->topsize > m->trim_check) + m->trim_check = MAX_SIZE_T; + } + + return (released != 0)? 1 : 0; +} + + +/* ---------------------------- malloc support --------------------------- */ + +/* allocate a large request from the best fitting chunk in a treebin */ +static void* tmalloc_large(mstate m, size_t nb) { + tchunkptr v = 0; + size_t rsize = -nb; /* Unsigned negation */ + tchunkptr t; + bindex_t idx; + compute_tree_index(nb, idx); + if ((t = *treebin_at(m, idx)) != 0) { + /* Traverse tree for this bin looking for node with size == nb */ + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; /* The deepest untaken right subtree */ + for (;;) { + tchunkptr rt; + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->child[1]; + t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) { + t = rst; /* set t to least subtree holding sizes > nb */ + break; + } + sizebits <<= 1; + } + } + if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ + binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; + if (leftbits != 0) { + bindex_t i; + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + t = *treebin_at(m, i); + } + } + + while (t != 0) { /* find smallest of tree or subtree */ + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + t = leftmost_child(t); + } + + /* If dv is a better fit, return 0 so malloc will use it */ + if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { + if (RTCHECK(ok_address(m, v))) { /* split */ + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + insert_chunk(m, r, rsize); + } + return chunk2mem(v); + } + } + CORRUPTION_ERROR_ACTION(m); + } + return 0; +} + +/* allocate a small request from the best fitting chunk in a treebin */ +static void* tmalloc_small(mstate m, size_t nb) { + tchunkptr t, v; + size_t rsize; + bindex_t i; + binmap_t leastbit = least_bit(m->treemap); + compute_bit2idx(leastbit, i); + v = t = *treebin_at(m, i); + rsize = chunksize(t) - nb; + + while ((t = leftmost_child(t)) != 0) { + size_t trem = chunksize(t) - nb; + if (trem < rsize) { + rsize = trem; + v = t; + } + } + + if (RTCHECK(ok_address(m, v))) { + mchunkptr r = chunk_plus_offset(v, nb); + assert(chunksize(v) == rsize + nb); + if (RTCHECK(ok_next(v, r))) { + unlink_large_chunk(m, v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(m, v, (rsize + nb)); + else { + set_size_and_pinuse_of_inuse_chunk(m, v, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(m, r, rsize); + } + return chunk2mem(v); + } + } + + CORRUPTION_ERROR_ACTION(m); + return 0; +} + +/* --------------------------- realloc support --------------------------- */ + +static void* internal_realloc(mstate m, void* oldmem, size_t bytes) { + if (bytes >= MAX_REQUEST) { + MALLOC_FAILURE_ACTION; + return 0; + } + if (!PREACTION(m)) { + mchunkptr oldp = mem2chunk(oldmem); + size_t oldsize = chunksize(oldp); + mchunkptr next = chunk_plus_offset(oldp, oldsize); + mchunkptr newp = 0; + void* extra = 0; + + /* Try to either shrink or extend into top. Else malloc-copy-free */ + + if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) && + ok_next(oldp, next) && ok_pinuse(next))) { + size_t nb = request2size(bytes); + if (is_mmapped(oldp)) + newp = mmap_resize(m, oldp, nb); + else if (oldsize >= nb) { /* already big enough */ + size_t rsize = oldsize - nb; + newp = oldp; + if (rsize >= MIN_CHUNK_SIZE) { + mchunkptr remainder = chunk_plus_offset(newp, nb); + set_inuse(m, newp, nb); + set_inuse(m, remainder, rsize); + extra = chunk2mem(remainder); + } + } + else if (next == m->top && oldsize + m->topsize > nb) { + /* Expand into top */ + size_t newsize = oldsize + m->topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = chunk_plus_offset(oldp, nb); + set_inuse(m, oldp, nb); + newtop->head = newtopsize |PINUSE_BIT; + m->top = newtop; + m->topsize = newtopsize; + newp = oldp; + } + } + else { + USAGE_ERROR_ACTION(m, oldmem); + POSTACTION(m); + return 0; + } + + POSTACTION(m); + + if (newp != 0) { + if (extra != 0) { + internal_free(m, extra); + } + check_inuse_chunk(m, newp); + return chunk2mem(newp); + } + else { + void* newmem = internal_malloc(m, bytes); + if (newmem != 0) { + size_t oc = oldsize - overhead_for(oldp); + memcpy(newmem, oldmem, (oc < bytes)? oc : bytes); + internal_free(m, oldmem); + } + return newmem; + } + } + return 0; +} + +/* --------------------------- memalign support -------------------------- */ + +static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { + if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */ + return internal_malloc(m, bytes); + if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ + size_t a = MALLOC_ALIGNMENT << 1; + while (a < alignment) a <<= 1; + alignment = a; + } + + if (bytes >= MAX_REQUEST - alignment) { + if (m != 0) { /* Test isn't needed but avoids compiler warning */ + MALLOC_FAILURE_ACTION; + } + } + else { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + char* mem = (char*)internal_malloc(m, req); + if (mem != 0) { + void* leader = 0; + void* trailer = 0; + mchunkptr p = mem2chunk(mem); + + if (PREACTION(m)) return 0; + if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */ + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((size_t)(((size_t)(mem + + alignment - + SIZE_T_ONE)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? + br : br+alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = chunksize(p) - leadsize; + + if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ + newp->prev_foot = p->prev_foot + leadsize; + newp->head = (newsize|CINUSE_BIT); + } + else { /* Otherwise, give back leader, use the rest */ + set_inuse(m, newp, newsize); + set_inuse(m, p, leadsize); + leader = chunk2mem(p); + } + p = newp; + } + + /* Give back spare room at the end */ + if (!is_mmapped(p)) { + size_t size = chunksize(p); + if (size > nb + MIN_CHUNK_SIZE) { + size_t remainder_size = size - nb; + mchunkptr remainder = chunk_plus_offset(p, nb); + set_inuse(m, p, nb); + set_inuse(m, remainder, remainder_size); + trailer = chunk2mem(remainder); + } + } + + assert (chunksize(p) >= nb); + assert((((size_t)(chunk2mem(p))) % alignment) == 0); + check_inuse_chunk(m, p); + POSTACTION(m); + if (leader != 0) { + internal_free(m, leader); + } + if (trailer != 0) { + internal_free(m, trailer); + } + return chunk2mem(p); + } + } + return 0; +} + +/* ------------------------ comalloc/coalloc support --------------------- */ + +static void** ialloc(mstate m, + size_t n_elements, + size_t* sizes, + int opts, + void* chunks[]) { + /* + This provides common support for independent_X routines, handling + all of the combinations that can result. + + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed + */ + + size_t element_size; /* chunksize of each element, if all same */ + size_t contents_size; /* total size of elements */ + size_t array_size; /* request size of pointer array */ + void* mem; /* malloced aggregate space */ + mchunkptr p; /* corresponding chunk */ + size_t remainder_size; /* remaining bytes while splitting */ + void** marray; /* either "chunks" or malloced ptr array */ + mchunkptr array_chunk; /* chunk for malloced ptr array */ + flag_t was_enabled; /* to disable mmap */ + size_t size; + size_t i; + + ensure_initialization(); + /* compute array length, if needed */ + if (chunks != 0) { + if (n_elements == 0) + return chunks; /* nothing to do */ + marray = chunks; + array_size = 0; + } + else { + /* if empty req, must still return chunk representing empty array */ + if (n_elements == 0) + return (void**)internal_malloc(m, 0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + /* compute total element size */ + if (opts & 0x1) { /* all-same-size */ + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else { /* add up all the sizes */ + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(m); + disable_mmap(m); + mem = internal_malloc(m, size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(m); + if (mem == 0) + return 0; + + if (PREACTION(m)) return 0; + p = mem2chunk(mem); + remainder_size = chunksize(p); + + assert(!is_mmapped(p)); + + if (opts & 0x2) { /* optionally clear the elements */ + memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); + } + + /* If not provided, allocate the pointer array as final part of chunk */ + if (marray == 0) { + size_t array_chunk_size; + array_chunk = chunk_plus_offset(p, contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**) (chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + /* split out elements */ + for (i = 0; ; ++i) { + marray[i] = chunk2mem(p); + if (i != n_elements-1) { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(m, p, size); + p = chunk_plus_offset(p, size); + } + else { /* the final element absorbs any overallocation slop */ + set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); + break; + } + } + +#if DEBUG + if (marray != chunks) { + /* final element must have exactly exhausted chunk */ + if (element_size != 0) { + assert(remainder_size == element_size); + } + else { + assert(remainder_size == request2size(sizes[i])); + } + check_inuse_chunk(m, mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(m, mem2chunk(marray[i])); + +#endif /* DEBUG */ + + POSTACTION(m); + return marray; +} + + +/* -------------------------- public routines ---------------------------- */ + +#if !ONLY_MSPACES + +void* dlmalloc(size_t bytes) { + /* + Basic algorithm: + If a small request (< 256 bytes minus per-chunk overhead): + 1. If one exists, use a remainderless chunk in associated smallbin. + (Remainderless means that there are too few excess bytes to + represent as a chunk.) + 2. If it is big enough, use the dv chunk, which is normally the + chunk adjacent to the one used for the most recent small request. + 3. If one exists, split the smallest available chunk in a bin, + saving remainder in dv. + 4. If it is big enough, use the top chunk. + 5. If available, get memory from system and use it + Otherwise, for a large request: + 1. Find the smallest available binned chunk that fits, and use it + if it is better fitting than dv chunk, splitting if necessary. + 2. If better fitting than any binned chunk, use the dv chunk. + 3. If it is big enough, use the top chunk. + 4. If request size >= mmap threshold, try to directly mmap this chunk. + 5. If available, get memory from system and use it + + The ugly goto's here ensure that postaction occurs along all paths. + */ + +#if USE_LOCKS + ensure_initialization(); /* initialize in sys_alloc if not using locks */ +#endif + + if (!PREACTION(gm)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = gm->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(gm, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(gm, b, p, idx); + set_inuse_and_pinuse(gm, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb > gm->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(gm, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(gm, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(gm, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(gm, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + } + + if (nb <= gm->dvsize) { + size_t rsize = gm->dvsize - nb; + mchunkptr p = gm->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = gm->dv = chunk_plus_offset(p, nb); + gm->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + } + else { /* exhaust dv */ + size_t dvs = gm->dvsize; + gm->dvsize = 0; + gm->dv = 0; + set_inuse_and_pinuse(gm, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + else if (nb < gm->topsize) { /* Split top */ + size_t rsize = gm->topsize -= nb; + mchunkptr p = gm->top; + mchunkptr r = gm->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(gm, p, nb); + mem = chunk2mem(p); + check_top_chunk(gm, gm->top); + check_malloced_chunk(gm, mem, nb); + goto postaction; + } + + mem = sys_alloc(gm, nb); + + postaction: + POSTACTION(gm); + return mem; + } + + return 0; +} + +void dlfree(void* mem) { + /* + Consolidate freed chunks with preceding or succeeding bordering + free chunks, if they exist, and then place in a bin. Intermixed + with special cases for top, dv, mmapped chunks, and usage errors. + */ + + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } +#else /* FOOTERS */ +#define fm gm +#endif /* FOOTERS */ + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +#if !FOOTERS +#undef fm +#endif /* FOOTERS */ +} + +void* dlcalloc(size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = dlmalloc(req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* dlrealloc(void* oldmem, size_t bytes) { + if (oldmem == 0) + return dlmalloc(bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + dlfree(oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if ! FOOTERS + mstate m = gm; +#else /* FOOTERS */ + mstate m = get_mstate_for(mem2chunk(oldmem)); + if (!ok_magic(m)) { + USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif /* FOOTERS */ + return internal_realloc(m, oldmem, bytes); + } +} + +void* dlmemalign(size_t alignment, size_t bytes) { + return internal_memalign(gm, alignment, bytes); +} + +void** dlindependent_calloc(size_t n_elements, size_t elem_size, + void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + return ialloc(gm, n_elements, &sz, 3, chunks); +} + +void** dlindependent_comalloc(size_t n_elements, size_t sizes[], + void* chunks[]) { + return ialloc(gm, n_elements, sizes, 0, chunks); +} + +void* dlvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, bytes); +} + +void* dlpvalloc(size_t bytes) { + size_t pagesz; + ensure_initialization(); + pagesz = mparams.page_size; + return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); +} + +int dlmalloc_trim(size_t pad) { + ensure_initialization(); + int result = 0; + if (!PREACTION(gm)) { + result = sys_trim(gm, pad); + POSTACTION(gm); + } + return result; +} + +size_t dlmalloc_footprint(void) { + return gm->footprint; +} + +size_t dlmalloc_max_footprint(void) { + return gm->max_footprint; +} + +#if !NO_MALLINFO +struct mallinfo dlmallinfo(void) { + return internal_mallinfo(gm); +} +#endif /* NO_MALLINFO */ + +void dlmalloc_stats() { + internal_malloc_stats(gm); +} + +int dlmallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* !ONLY_MSPACES */ + +size_t dlmalloc_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +/* ----------------------------- user mspaces ---------------------------- */ + +#if MSPACES + +static mstate init_user_mstate(char* tbase, size_t tsize) { + size_t msize = pad_request(sizeof(struct malloc_state)); + mchunkptr mn; + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + INITIAL_LOCK(&m->mutex); + msp->head = (msize|PINUSE_BIT|CINUSE_BIT); + m->seg.base = m->least_addr = tbase; + m->seg.size = m->footprint = m->max_footprint = tsize; + m->magic = mparams.magic; + m->release_checks = MAX_RELEASE_CHECK_RATE; + m->mflags = mparams.default_mflags; + m->extp = 0; + m->exts = 0; + disable_contiguous(m); + init_bins(m); + mn = next_chunk(mem2chunk(m)); + init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); + check_top_chunk(m, m->top); + return m; +} + +mspace create_mspace(size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + size_t rs = ((capacity == 0)? mparams.granularity : + (capacity + TOP_FOOT_SIZE + msize)); + size_t tsize = granularity_align(rs); + char* tbase = (char*)(CALL_MMAP(tsize)); + if (tbase != CMFAIL) { + m = init_user_mstate(tbase, tsize); + m->seg.sflags = IS_MMAPPED_BIT; + set_lock(m, locked); + } + } + return (mspace)m; +} + +mspace create_mspace_with_base(void* base, size_t capacity, int locked) { + mstate m = 0; + size_t msize; + ensure_initialization(); + msize = pad_request(sizeof(struct malloc_state)); + if (capacity > msize + TOP_FOOT_SIZE && + capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { + m = init_user_mstate((char*)base, capacity); + m->seg.sflags = EXTERN_BIT; + set_lock(m, locked); + } + return (mspace)m; +} + +int mspace_mmap_large_chunks(mspace msp, int enable) { + int ret = 0; + mstate ms = (mstate)msp; + if (!PREACTION(ms)) { + if (use_mmap(ms)) + ret = 1; + if (enable) + enable_mmap(ms); + else + disable_mmap(ms); + POSTACTION(ms); + } + return ret; +} + +size_t destroy_mspace(mspace msp) { + size_t freed = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + msegmentptr sp = &ms->seg; + while (sp != 0) { + char* base = sp->base; + size_t size = sp->size; + flag_t flag = sp->sflags; + sp = sp->next; + if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) && + CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return freed; +} + +/* + mspace versions of routines are near-clones of the global + versions. This is not so nice but better than the alternatives. +*/ + + +void* mspace_malloc(mspace msp, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (!PREACTION(ms)) { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = ms->smallmap >> idx; + + if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ + mchunkptr b, p; + idx += ~smallbits & 1; /* Uses next bin if idx empty */ + b = smallbin_at(ms, idx); + p = b->fd; + assert(chunksize(p) == small_index2size(idx)); + unlink_first_small_chunk(ms, b, p, idx); + set_inuse_and_pinuse(ms, p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb > ms->dvsize) { + if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ + mchunkptr b, p, r; + size_t rsize; + bindex_t i; + binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + compute_bit2idx(leastbit, i); + b = smallbin_at(ms, i); + p = b->fd; + assert(chunksize(p) == small_index2size(i)); + unlink_first_small_chunk(ms, b, p, i); + rsize = small_index2size(i) - nb; + /* Fit here cannot be remainderless if 4byte sizes */ + if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(ms, p, small_index2size(i)); + else { + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + r = chunk_plus_offset(p, nb); + set_size_and_pinuse_of_free_chunk(r, rsize); + replace_dv(ms, r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ + else { + nb = pad_request(bytes); + if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + } + + if (nb <= ms->dvsize) { + size_t rsize = ms->dvsize - nb; + mchunkptr p = ms->dv; + if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ + mchunkptr r = ms->dv = chunk_plus_offset(p, nb); + ms->dvsize = rsize; + set_size_and_pinuse_of_free_chunk(r, rsize); + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + } + else { /* exhaust dv */ + size_t dvs = ms->dvsize; + ms->dvsize = 0; + ms->dv = 0; + set_inuse_and_pinuse(ms, p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + else if (nb < ms->topsize) { /* Split top */ + size_t rsize = ms->topsize -= nb; + mchunkptr p = ms->top; + mchunkptr r = ms->top = chunk_plus_offset(p, nb); + r->head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(ms, p, nb); + mem = chunk2mem(p); + check_top_chunk(ms, ms->top); + check_malloced_chunk(ms, mem, nb); + goto postaction; + } + + mem = sys_alloc(ms, nb); + + postaction: + POSTACTION(ms); + return mem; + } + + return 0; +} + +void mspace_free(mspace msp, void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); +#if FOOTERS + mstate fm = get_mstate_for(p); +#else /* FOOTERS */ + mstate fm = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + if (!PREACTION(fm)) { + check_inuse_chunk(fm, p); + if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) { + size_t psize = chunksize(p); + mchunkptr next = chunk_plus_offset(p, psize); + if (!pinuse(p)) { + size_t prevsize = p->prev_foot; + if ((prevsize & IS_MMAPPED_BIT) != 0) { + prevsize &= ~IS_MMAPPED_BIT; + psize += prevsize + MMAP_FOOT_PAD; + if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) + fm->footprint -= psize; + goto postaction; + } + else { + mchunkptr prev = chunk_minus_offset(p, prevsize); + psize += prevsize; + p = prev; + if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ + if (p != fm->dv) { + unlink_chunk(fm, p, prevsize); + } + else if ((next->head & INUSE_BITS) == INUSE_BITS) { + fm->dvsize = psize; + set_free_with_pinuse(p, psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { + if (!cinuse(next)) { /* consolidate forward */ + if (next == fm->top) { + size_t tsize = fm->topsize += psize; + fm->top = p; + p->head = tsize | PINUSE_BIT; + if (p == fm->dv) { + fm->dv = 0; + fm->dvsize = 0; + } + if (should_trim(fm, tsize)) + sys_trim(fm, 0); + goto postaction; + } + else if (next == fm->dv) { + size_t dsize = fm->dvsize += psize; + fm->dv = p; + set_size_and_pinuse_of_free_chunk(p, dsize); + goto postaction; + } + else { + size_t nsize = chunksize(next); + psize += nsize; + unlink_chunk(fm, next, nsize); + set_size_and_pinuse_of_free_chunk(p, psize); + if (p == fm->dv) { + fm->dvsize = psize; + goto postaction; + } + } + } + else + set_free_with_pinuse(p, psize, next); + + if (is_small(psize)) { + insert_small_chunk(fm, p, psize); + check_free_chunk(fm, p); + } + else { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(fm, tp, psize); + check_free_chunk(fm, p); + if (--fm->release_checks == 0) + release_unused_segments(fm); + } + goto postaction; + } + } + erroraction: + USAGE_ERROR_ACTION(fm, p); + postaction: + POSTACTION(fm); + } + } +} + +void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + if (n_elements != 0) { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = MAX_SIZE_T; /* force downstream failure on overflow */ + } + mem = internal_malloc(ms, req); + if (mem != 0 && calloc_must_clear(mem2chunk(mem))) + memset(mem, 0, req); + return mem; +} + +void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { + if (oldmem == 0) + return mspace_malloc(msp, bytes); +#ifdef REALLOC_ZERO_BYTES_FREES + if (bytes == 0) { + mspace_free(msp, oldmem); + return 0; + } +#endif /* REALLOC_ZERO_BYTES_FREES */ + else { +#if FOOTERS + mchunkptr p = mem2chunk(oldmem); + mstate ms = get_mstate_for(p); +#else /* FOOTERS */ + mstate ms = (mstate)msp; +#endif /* FOOTERS */ + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_realloc(ms, oldmem, bytes); + } +} + +void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return internal_memalign(ms, alignment, bytes); +} + +void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) { + size_t sz = elem_size; /* serves as 1-element array */ + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, &sz, 3, chunks); +} + +void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + return 0; + } + return ialloc(ms, n_elements, sizes, 0, chunks); +} + +int mspace_trim(mspace msp, size_t pad) { + int result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + if (!PREACTION(ms)) { + result = sys_trim(ms, pad); + POSTACTION(ms); + } + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + +void mspace_malloc_stats(mspace msp) { + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + internal_malloc_stats(ms); + } + else { + USAGE_ERROR_ACTION(ms,ms); + } +} + +size_t mspace_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + + +size_t mspace_max_footprint(mspace msp) { + size_t result = 0; + mstate ms = (mstate)msp; + if (ok_magic(ms)) { + result = ms->max_footprint; + } + else { + USAGE_ERROR_ACTION(ms,ms); + } + return result; +} + + +#if !NO_MALLINFO +struct mallinfo mspace_mallinfo(mspace msp) { + mstate ms = (mstate)msp; + if (!ok_magic(ms)) { + USAGE_ERROR_ACTION(ms,ms); + } + return internal_mallinfo(ms); +} +#endif /* NO_MALLINFO */ + +size_t mspace_usable_size(void* mem) { + if (mem != 0) { + mchunkptr p = mem2chunk(mem); + if (cinuse(p)) + return chunksize(p) - overhead_for(p); + } + return 0; +} + +int mspace_mallopt(int param_number, int value) { + return change_mparam(param_number, value); +} + +#endif /* MSPACES */ + +/* -------------------- Alternative MORECORE functions ------------------- */ + +/* + Guidelines for creating a custom version of MORECORE: + + * For best performance, MORECORE should allocate in multiples of pagesize. + * MORECORE may allocate more memory than requested. (Or even less, + but this will usually result in a malloc failure.) + * MORECORE must not allocate memory when given argument zero, but + instead return one past the end address of memory from previous + nonzero call. + * For best performance, consecutive calls to MORECORE with positive + arguments should return increasing addresses, indicating that + space has been contiguously extended. + * Even though consecutive calls to MORECORE need not return contiguous + addresses, it must be OK for malloc'ed chunks to span multiple + regions in those cases where they do happen to be contiguous. + * MORECORE need not handle negative arguments -- it may instead + just return MFAIL when given negative arguments. + Negative arguments are always multiples of pagesize. MORECORE + must not misinterpret negative args as large positive unsigned + args. You can suppress all such calls from even occurring by defining + MORECORE_CANNOT_TRIM, + + As an example alternative MORECORE, here is a custom allocator + kindly contributed for pre-OSX macOS. It uses virtually but not + necessarily physically contiguous non-paged memory (locked in, + present and won't get swapped out). You can use it by uncommenting + this section, adding some #includes, and setting up the appropriate + defines above: + + #define MORECORE osMoreCore + + There is also a shutdown routine that should somehow be called for + cleanup upon program exit. + + #define MAX_POOL_ENTRIES 100 + #define MINIMUM_MORECORE_SIZE (64 * 1024U) + static int next_os_pool; + void *our_os_pools[MAX_POOL_ENTRIES]; + + void *osMoreCore(int size) + { + void *ptr = 0; + static void *sbrk_top = 0; + + if (size > 0) + { + if (size < MINIMUM_MORECORE_SIZE) + size = MINIMUM_MORECORE_SIZE; + if (CurrentExecutionLevel() == kTaskLevel) + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); + if (ptr == 0) + { + return (void *) MFAIL; + } + // save ptrs so they can be freed during cleanup + our_os_pools[next_os_pool] = ptr; + next_os_pool++; + ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); + sbrk_top = (char *) ptr + size; + return ptr; + } + else if (size < 0) + { + // we don't currently support shrink behavior + return (void *) MFAIL; + } + else + { + return sbrk_top; + } + } + + // cleanup any allocated memory pools + // called as last thing before shutting down driver + + void osCleanupMem(void) + { + void **ptr; + + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) + if (*ptr) + { + PoolDeallocate(*ptr); + *ptr = 0; + } + } + +*/ + + +/* ----------------------------------------------------------------------- +History: + V2.8.4 (not yet released) + * Add mspace_mmap_large_chunks; thanks to Jean Brouwers + * Fix insufficient sys_alloc padding when using 16byte alignment + * Fix bad error check in mspace_footprint + * Adaptations for ptmalloc, courtesy of Wolfram Gloger. + * Reentrant spin locks, courtesy of Earl Chew and others + * Win32 improvements, courtesy of Niall Douglas and Earl Chew + * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options + * Extension hook in malloc_state + * Various small adjustments to reduce warnings on some compilers + * Various configuration extensions/changes for more platforms. Thanks + to all who contributed these. + + V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) + * Add max_footprint functions + * Ensure all appropriate literals are size_t + * Fix conditional compilation problem for some #define settings + * Avoid concatenating segments with the one provided + in create_mspace_with_base + * Rename some variables to avoid compiler shadowing warnings + * Use explicit lock initialization. + * Better handling of sbrk interference. + * Simplify and fix segment insertion, trimming and mspace_destroy + * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x + * Thanks especially to Dennis Flanagan for help on these. + + V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) + * Fix memalign brace error. + + V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) + * Fix improper #endif nesting in C++ + * Add explicit casts needed for C++ + + V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) + * Use trees for large bins + * Support mspaces + * Use segments to unify sbrk-based and mmap-based system allocation, + removing need for emulation on most platforms without sbrk. + * Default safety checks + * Optional footer checks. Thanks to William Robertson for the idea. + * Internal code refactoring + * Incorporate suggestions and platform-specific changes. + Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, + Aaron Bachmann, Emery Berger, and others. + * Speed up non-fastbin processing enough to remove fastbins. + * Remove useless cfree() to avoid conflicts with other apps. + * Remove internal memcpy, memset. Compilers handle builtins better. + * Remove some options that no one ever used and rename others. + + V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) + * Fix malloc_state bitmap array misdeclaration + + V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) + * Allow tuning of FIRST_SORTED_BIN_SIZE + * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. + * Better detection and support for non-contiguousness of MORECORE. + Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger + * Bypass most of malloc if no frees. Thanks To Emery Berger. + * Fix freeing of old top non-contiguous chunk im sysmalloc. + * Raised default trim and map thresholds to 256K. + * Fix mmap-related #defines. Thanks to Lubos Lunak. + * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. + * Branch-free bin calculation + * Default trim and mmap thresholds now 256K. + + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) + * Introduce independent_comalloc and independent_calloc. + Thanks to Michael Pachos for motivation and help. + * Make optional .h file available + * Allow > 2GB requests on 32bit systems. + * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>. + Thanks also to Andreas Mueller <a.mueller at paradatec.de>, + and Anonymous. + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for + helping test this.) + * memalign: check alignment arg + * realloc: don't try to shift chunks backwards, since this + leads to more fragmentation in some programs and doesn't + seem to help in any others. + * Collect all cases in malloc requiring system memory into sysmalloc + * Use mmap as backup to sbrk + * Place all internal state in malloc_state + * Introduce fastbins (although similar to 2.5.1) + * Many minor tunings and cosmetic improvements + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS + Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. + * Include errno.h to support default failure action. + + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) + * return null for negative arguments + * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' + (e.g. WIN32 platforms) + * Cleanup header file inclusion for WIN32 platforms + * Cleanup code to avoid Microsoft Visual C++ compiler complaints + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing + memory allocation routines + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to + usage of 'assert' in non-WIN32 code + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to + avoid infinite loop + * Always call 'fREe()' rather than 'free()' + + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) + * Fixed ordering problem with boundary-stamping + + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) + * Added pvalloc, as recommended by H.J. Liu + * Added 64bit pointer support mainly from Wolfram Gloger + * Added anonymously donated WIN32 sbrk emulation + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen + * malloc_extend_top: fix mask error that caused wastage after + foreign sbrks + * Add linux mremap support code from HJ Liu + + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) + * Integrated most documentation with the code. + * Add support for mmap, with help from + Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Use last_remainder in more cases. + * Pack bins using idea from colin@nyx10.cs.du.edu + * Use ordered bins instead of best-fit threshold + * Eliminate block-local decls to simplify tracing and debugging. + * Support another case of realloc via move into top + * Fix error occurring when initial sbrk_base not word-aligned. + * Rely on page size for units instead of SBRK_UNIT to + avoid surprises about sbrk alignment conventions. + * Add mallinfo, mallopt. Thanks to Raymond Nijssen + (raymond@es.ele.tue.nl) for the suggestion. + * Add `pad' argument to malloc_trim and top_pad mallopt parameter. + * More precautions for cases where other routines call sbrk, + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). + * Added macros etc., allowing use in linux libc from + H.J. Lu (hjl@gnu.ai.mit.edu) + * Inverted this history list + + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) + * Re-tuned and fixed to behave more nicely with V2.6.0 changes. + * Removed all preallocation code since under current scheme + the work required to undo bad preallocations exceeds + the work saved in good cases for most test programs. + * No longer use return list or unconsolidated bins since + no scheme using them consistently outperforms those that don't + given above changes. + * Use best fit for very large chunks to prevent some worst-cases. + * Added some support for debugging + + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) + * Removed footers when chunks are in use. Thanks to + Paul Wilson (wilson@cs.texas.edu) for the suggestion. + + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) + * Added malloc_trim, with help from Wolfram Gloger + (wmglo@Dent.MED.Uni-Muenchen.DE). + + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) + + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) + * realloc: try to expand in both directions + * malloc: swap order of clean-bin strategy; + * realloc: only conditionally expand backwards + * Try not to scavenge used bins + * Use bin counts as a guide to preallocation + * Occasionally bin return list chunks in first scan + * Add a few optimizations from colin@nyx10.cs.du.edu + + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) + * faster bin computation & slightly different binning + * merged all consolidations to one part of malloc proper + (eliminating old malloc_find_space & malloc_clean_bin) + * Scan 2 returns chunks (not just 1) + * Propagate failure in realloc if malloc returns 0 + * Add stuff to allow compilation on non-ANSI compilers + from kpv@research.att.com + + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) + * removed potential for odd address access in prev_chunk + * removed dependency on getpagesize.h + * misc cosmetics and a bit more internal documentation + * anticosmetics: mangled names in macros to evade debugger strangeness + * tested on sparc, hp-700, dec-mips, rs6000 + with gcc & native cc (hp, dec only) allowing + Detlefs & Zorn comparison study (in SIGPLAN Notices.) + + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) + * Based loosely on libg++-1.2X malloc. (It retains some of the overall + structure of old version, but most details differ.) + +*/ diff --git a/compat/nedmalloc/nedmalloc.c b/compat/nedmalloc/nedmalloc.c new file mode 100644 index 0000000000..609ebba125 --- /dev/null +++ b/compat/nedmalloc/nedmalloc.c @@ -0,0 +1,969 @@ +/* Alternative malloc implementation for multiple threads without +lock contention based on dlmalloc. (C) 2005-2006 Niall Douglas + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + +#ifdef _MSC_VER +/* Enable full aliasing on MSVC */ +/*#pragma optimize("a", on)*/ +#endif + +/*#define FULLSANITYCHECKS*/ + +#include "nedmalloc.h" +#if defined(WIN32) + #include <malloc.h> +#endif +#define MSPACES 1 +#define ONLY_MSPACES 1 +#ifndef USE_LOCKS + #define USE_LOCKS 1 +#endif +#define FOOTERS 1 /* Need to enable footers so frees lock the right mspace */ +#undef DEBUG /* dlmalloc wants DEBUG either 0 or 1 */ +#ifdef _DEBUG + #define DEBUG 1 +#else + #define DEBUG 0 +#endif +#ifdef NDEBUG /* Disable assert checking on release builds */ + #undef DEBUG +#endif +/* The default of 64Kb means we spend too much time kernel-side */ +#ifndef DEFAULT_GRANULARITY +#define DEFAULT_GRANULARITY (1*1024*1024) +#endif +/*#define USE_SPIN_LOCKS 0*/ + + +/*#define FORCEINLINE*/ +#include "malloc.c.h" +#ifdef NDEBUG /* Disable assert checking on release builds */ + #undef DEBUG +#endif + +/* The maximum concurrent threads in a pool possible */ +#ifndef MAXTHREADSINPOOL +#define MAXTHREADSINPOOL 16 +#endif +/* The maximum number of threadcaches which can be allocated */ +#ifndef THREADCACHEMAXCACHES +#define THREADCACHEMAXCACHES 256 +#endif +/* The maximum size to be allocated from the thread cache */ +#ifndef THREADCACHEMAX +#define THREADCACHEMAX 8192 +#endif +#if 0 +/* The number of cache entries for finer grained bins. This is (topbitpos(THREADCACHEMAX)-4)*2 */ +#define THREADCACHEMAXBINS ((13-4)*2) +#else +/* The number of cache entries. This is (topbitpos(THREADCACHEMAX)-4) */ +#define THREADCACHEMAXBINS (13-4) +#endif +/* Point at which the free space in a thread cache is garbage collected */ +#ifndef THREADCACHEMAXFREESPACE +#define THREADCACHEMAXFREESPACE (512*1024) +#endif + + +#ifdef WIN32 + #define TLSVAR DWORD + #define TLSALLOC(k) (*(k)=TlsAlloc(), TLS_OUT_OF_INDEXES==*(k)) + #define TLSFREE(k) (!TlsFree(k)) + #define TLSGET(k) TlsGetValue(k) + #define TLSSET(k, a) (!TlsSetValue(k, a)) + #ifdef DEBUG +static LPVOID ChkedTlsGetValue(DWORD idx) +{ + LPVOID ret=TlsGetValue(idx); + assert(S_OK==GetLastError()); + return ret; +} + #undef TLSGET + #define TLSGET(k) ChkedTlsGetValue(k) + #endif +#else + #define TLSVAR pthread_key_t + #define TLSALLOC(k) pthread_key_create(k, 0) + #define TLSFREE(k) pthread_key_delete(k) + #define TLSGET(k) pthread_getspecific(k) + #define TLSSET(k, a) pthread_setspecific(k, a) +#endif + +#if 0 +/* Only enable if testing with valgrind. Causes misoperation */ +#define mspace_malloc(p, s) malloc(s) +#define mspace_realloc(p, m, s) realloc(m, s) +#define mspace_calloc(p, n, s) calloc(n, s) +#define mspace_free(p, m) free(m) +#endif + + +#if defined(__cplusplus) +#if !defined(NO_NED_NAMESPACE) +namespace nedalloc { +#else +extern "C" { +#endif +#endif + +size_t nedblksize(void *mem) THROWSPEC +{ +#if 0 + /* Only enable if testing with valgrind. Causes misoperation */ + return THREADCACHEMAX; +#else + if(mem) + { + mchunkptr p=mem2chunk(mem); + assert(cinuse(p)); /* If this fails, someone tried to free a block twice */ + if(cinuse(p)) + return chunksize(p)-overhead_for(p); + } + return 0; +#endif +} + +void nedsetvalue(void *v) THROWSPEC { nedpsetvalue(0, v); } +void * nedmalloc(size_t size) THROWSPEC { return nedpmalloc(0, size); } +void * nedcalloc(size_t no, size_t size) THROWSPEC { return nedpcalloc(0, no, size); } +void * nedrealloc(void *mem, size_t size) THROWSPEC { return nedprealloc(0, mem, size); } +void nedfree(void *mem) THROWSPEC { nedpfree(0, mem); } +void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC { return nedpmemalign(0, alignment, bytes); } +#if !NO_MALLINFO +struct mallinfo nedmallinfo(void) THROWSPEC { return nedpmallinfo(0); } +#endif +int nedmallopt(int parno, int value) THROWSPEC { return nedpmallopt(0, parno, value); } +int nedmalloc_trim(size_t pad) THROWSPEC { return nedpmalloc_trim(0, pad); } +void nedmalloc_stats(void) THROWSPEC { nedpmalloc_stats(0); } +size_t nedmalloc_footprint(void) THROWSPEC { return nedpmalloc_footprint(0); } +void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC { return nedpindependent_calloc(0, elemsno, elemsize, chunks); } +void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC { return nedpindependent_comalloc(0, elems, sizes, chunks); } + +struct threadcacheblk_t; +typedef struct threadcacheblk_t threadcacheblk; +struct threadcacheblk_t +{ /* Keep less than 16 bytes on 32 bit systems and 32 bytes on 64 bit systems */ +#ifdef FULLSANITYCHECKS + unsigned int magic; +#endif + unsigned int lastUsed, size; + threadcacheblk *next, *prev; +}; +typedef struct threadcache_t +{ +#ifdef FULLSANITYCHECKS + unsigned int magic1; +#endif + int mymspace; /* Last mspace entry this thread used */ + long threadid; + unsigned int mallocs, frees, successes; + size_t freeInCache; /* How much free space is stored in this cache */ + threadcacheblk *bins[(THREADCACHEMAXBINS+1)*2]; +#ifdef FULLSANITYCHECKS + unsigned int magic2; +#endif +} threadcache; +struct nedpool_t +{ + MLOCK_T mutex; + void *uservalue; + int threads; /* Max entries in m to use */ + threadcache *caches[THREADCACHEMAXCACHES]; + TLSVAR mycache; /* Thread cache for this thread. 0 for unset, negative for use mspace-1 directly, otherwise is cache-1 */ + mstate m[MAXTHREADSINPOOL+1]; /* mspace entries for this pool */ +}; +static nedpool syspool; + +static FORCEINLINE unsigned int size2binidx(size_t _size) THROWSPEC +{ /* 8=1000 16=10000 20=10100 24=11000 32=100000 48=110000 4096=1000000000000 */ + unsigned int topbit, size=(unsigned int)(_size>>4); + /* 16=1 20=1 24=1 32=10 48=11 64=100 96=110 128=1000 4096=100000000 */ + +#if defined(__GNUC__) + topbit = sizeof(size)*__CHAR_BIT__ - 1 - __builtin_clz(size); +#elif defined(_MSC_VER) && _MSC_VER>=1300 + { + unsigned long bsrTopBit; + + _BitScanReverse(&bsrTopBit, size); + + topbit = bsrTopBit; + } +#else +#if 0 + union { + unsigned asInt[2]; + double asDouble; + }; + int n; + + asDouble = (double)size + 0.5; + topbit = (asInt[!FOX_BIGENDIAN] >> 20) - 1023; +#else + { + unsigned int x=size; + x = x | (x >> 1); + x = x | (x >> 2); + x = x | (x >> 4); + x = x | (x >> 8); + x = x | (x >>16); + x = ~x; + x = x - ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x = x + (x << 8); + x = x + (x << 16); + topbit=31 - (x >> 24); + } +#endif +#endif + return topbit; +} + + +#ifdef FULLSANITYCHECKS +static void tcsanitycheck(threadcacheblk **ptr) THROWSPEC +{ + assert((ptr[0] && ptr[1]) || (!ptr[0] && !ptr[1])); + if(ptr[0] && ptr[1]) + { + assert(nedblksize(ptr[0])>=sizeof(threadcacheblk)); + assert(nedblksize(ptr[1])>=sizeof(threadcacheblk)); + assert(*(unsigned int *) "NEDN"==ptr[0]->magic); + assert(*(unsigned int *) "NEDN"==ptr[1]->magic); + assert(!ptr[0]->prev); + assert(!ptr[1]->next); + if(ptr[0]==ptr[1]) + { + assert(!ptr[0]->next); + assert(!ptr[1]->prev); + } + } +} +static void tcfullsanitycheck(threadcache *tc) THROWSPEC +{ + threadcacheblk **tcbptr=tc->bins; + int n; + for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) + { + threadcacheblk *b, *ob=0; + tcsanitycheck(tcbptr); + for(b=tcbptr[0]; b; ob=b, b=b->next) + { + assert(*(unsigned int *) "NEDN"==b->magic); + assert(!ob || ob->next==b); + assert(!ob || b->prev==ob); + } + } +} +#endif + +static NOINLINE void RemoveCacheEntries(nedpool *p, threadcache *tc, unsigned int age) THROWSPEC +{ +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + if(tc->freeInCache) + { + threadcacheblk **tcbptr=tc->bins; + int n; + for(n=0; n<=THREADCACHEMAXBINS; n++, tcbptr+=2) + { + threadcacheblk **tcb=tcbptr+1; /* come from oldest end of list */ + /*tcsanitycheck(tcbptr);*/ + for(; *tcb && tc->frees-(*tcb)->lastUsed>=age; ) + { + threadcacheblk *f=*tcb; + size_t blksize=f->size; /*nedblksize(f);*/ + assert(blksize<=nedblksize(f)); + assert(blksize); +#ifdef FULLSANITYCHECKS + assert(*(unsigned int *) "NEDN"==(*tcb)->magic); +#endif + *tcb=(*tcb)->prev; + if(*tcb) + (*tcb)->next=0; + else + *tcbptr=0; + tc->freeInCache-=blksize; + assert((long) tc->freeInCache>=0); + mspace_free(0, f); + /*tcsanitycheck(tcbptr);*/ + } + } + } +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif +} +static void DestroyCaches(nedpool *p) THROWSPEC +{ + if(p->caches) + { + threadcache *tc; + int n; + for(n=0; n<THREADCACHEMAXCACHES; n++) + { + if((tc=p->caches[n])) + { + tc->frees++; + RemoveCacheEntries(p, tc, 0); + assert(!tc->freeInCache); + tc->mymspace=-1; + tc->threadid=0; + mspace_free(0, tc); + p->caches[n]=0; + } + } + } +} + +static NOINLINE threadcache *AllocCache(nedpool *p) THROWSPEC +{ + threadcache *tc=0; + int n, end; + ACQUIRE_LOCK(&p->mutex); + for(n=0; n<THREADCACHEMAXCACHES && p->caches[n]; n++); + if(THREADCACHEMAXCACHES==n) + { /* List exhausted, so disable for this thread */ + RELEASE_LOCK(&p->mutex); + return 0; + } + tc=p->caches[n]=(threadcache *) mspace_calloc(p->m[0], 1, sizeof(threadcache)); + if(!tc) + { + RELEASE_LOCK(&p->mutex); + return 0; + } +#ifdef FULLSANITYCHECKS + tc->magic1=*(unsigned int *)"NEDMALC1"; + tc->magic2=*(unsigned int *)"NEDMALC2"; +#endif + tc->threadid=(long)(size_t)CURRENT_THREAD; + for(end=0; p->m[end]; end++); + tc->mymspace=tc->threadid % end; + RELEASE_LOCK(&p->mutex); + if(TLSSET(p->mycache, (void *)(size_t)(n+1))) abort(); + return tc; +} + +static void *threadcache_malloc(nedpool *p, threadcache *tc, size_t *size) THROWSPEC +{ + void *ret=0; + unsigned int bestsize; + unsigned int idx=size2binidx(*size); + size_t blksize=0; + threadcacheblk *blk, **binsptr; +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + /* Calculate best fit bin size */ + bestsize=1<<(idx+4); +#if 0 + /* Finer grained bin fit */ + idx<<=1; + if(*size>bestsize) + { + idx++; + bestsize+=bestsize>>1; + } + if(*size>bestsize) + { + idx++; + bestsize=1<<(4+(idx>>1)); + } +#else + if(*size>bestsize) + { + idx++; + bestsize<<=1; + } +#endif + assert(bestsize>=*size); + if(*size<bestsize) *size=bestsize; + assert(*size<=THREADCACHEMAX); + assert(idx<=THREADCACHEMAXBINS); + binsptr=&tc->bins[idx*2]; + /* Try to match close, but move up a bin if necessary */ + blk=*binsptr; + if(!blk || blk->size<*size) + { /* Bump it up a bin */ + if(idx<THREADCACHEMAXBINS) + { + idx++; + binsptr+=2; + blk=*binsptr; + } + } + if(blk) + { + blksize=blk->size; /*nedblksize(blk);*/ + assert(nedblksize(blk)>=blksize); + assert(blksize>=*size); + if(blk->next) + blk->next->prev=0; + *binsptr=blk->next; + if(!*binsptr) + binsptr[1]=0; +#ifdef FULLSANITYCHECKS + blk->magic=0; +#endif + assert(binsptr[0]!=blk && binsptr[1]!=blk); + assert(nedblksize(blk)>=sizeof(threadcacheblk) && nedblksize(blk)<=THREADCACHEMAX+CHUNK_OVERHEAD); + /*printf("malloc: %p, %p, %p, %lu\n", p, tc, blk, (long) size);*/ + ret=(void *) blk; + } + ++tc->mallocs; + if(ret) + { + assert(blksize>=*size); + ++tc->successes; + tc->freeInCache-=blksize; + assert((long) tc->freeInCache>=0); + } +#if defined(DEBUG) && 0 + if(!(tc->mallocs & 0xfff)) + { + printf("*** threadcache=%u, mallocs=%u (%f), free=%u (%f), freeInCache=%u\n", (unsigned int) tc->threadid, tc->mallocs, + (float) tc->successes/tc->mallocs, tc->frees, (float) tc->successes/tc->frees, (unsigned int) tc->freeInCache); + } +#endif +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + return ret; +} +static NOINLINE void ReleaseFreeInCache(nedpool *p, threadcache *tc, int mymspace) THROWSPEC +{ + unsigned int age=THREADCACHEMAXFREESPACE/8192; + /*ACQUIRE_LOCK(&p->m[mymspace]->mutex);*/ + while(age && tc->freeInCache>=THREADCACHEMAXFREESPACE) + { + RemoveCacheEntries(p, tc, age); + /*printf("*** Removing cache entries older than %u (%u)\n", age, (unsigned int) tc->freeInCache);*/ + age>>=1; + } + /*RELEASE_LOCK(&p->m[mymspace]->mutex);*/ +} +static void threadcache_free(nedpool *p, threadcache *tc, int mymspace, void *mem, size_t size) THROWSPEC +{ + unsigned int bestsize; + unsigned int idx=size2binidx(size); + threadcacheblk **binsptr, *tck=(threadcacheblk *) mem; + assert(size>=sizeof(threadcacheblk) && size<=THREADCACHEMAX+CHUNK_OVERHEAD); +#ifdef DEBUG + { /* Make sure this is a valid memory block */ + mchunkptr p = mem2chunk(mem); + mstate fm = get_mstate_for(p); + if (!ok_magic(fm)) { + USAGE_ERROR_ACTION(fm, p); + return; + } + } +#endif +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif + /* Calculate best fit bin size */ + bestsize=1<<(idx+4); +#if 0 + /* Finer grained bin fit */ + idx<<=1; + if(size>bestsize) + { + unsigned int biggerbestsize=bestsize+bestsize<<1; + if(size>=biggerbestsize) + { + idx++; + bestsize=biggerbestsize; + } + } +#endif + if(bestsize!=size) /* dlmalloc can round up, so we round down to preserve indexing */ + size=bestsize; + binsptr=&tc->bins[idx*2]; + assert(idx<=THREADCACHEMAXBINS); + if(tck==*binsptr) + { + fprintf(stderr, "Attempt to free already freed memory block %p - aborting!\n", tck); + abort(); + } +#ifdef FULLSANITYCHECKS + tck->magic=*(unsigned int *) "NEDN"; +#endif + tck->lastUsed=++tc->frees; + tck->size=(unsigned int) size; + tck->next=*binsptr; + tck->prev=0; + if(tck->next) + tck->next->prev=tck; + else + binsptr[1]=tck; + assert(!*binsptr || (*binsptr)->size==tck->size); + *binsptr=tck; + assert(tck==tc->bins[idx*2]); + assert(tc->bins[idx*2+1]==tck || binsptr[0]->next->prev==tck); + /*printf("free: %p, %p, %p, %lu\n", p, tc, mem, (long) size);*/ + tc->freeInCache+=size; +#ifdef FULLSANITYCHECKS + tcfullsanitycheck(tc); +#endif +#if 1 + if(tc->freeInCache>=THREADCACHEMAXFREESPACE) + ReleaseFreeInCache(p, tc, mymspace); +#endif +} + + + + +static NOINLINE int InitPool(nedpool *p, size_t capacity, int threads) THROWSPEC +{ /* threads is -1 for system pool */ + ensure_initialization(); + ACQUIRE_MALLOC_GLOBAL_LOCK(); + if(p->threads) goto done; + if(INITIAL_LOCK(&p->mutex)) goto err; + if(TLSALLOC(&p->mycache)) goto err; + if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err; + p->m[0]->extp=p; + p->threads=(threads<1 || threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : threads; +done: + RELEASE_MALLOC_GLOBAL_LOCK(); + return 1; +err: + if(threads<0) + abort(); /* If you can't allocate for system pool, we're screwed */ + DestroyCaches(p); + if(p->m[0]) + { + destroy_mspace(p->m[0]); + p->m[0]=0; + } + if(p->mycache) + { + if(TLSFREE(p->mycache)) abort(); + p->mycache=0; + } + RELEASE_MALLOC_GLOBAL_LOCK(); + return 0; +} +static NOINLINE mstate FindMSpace(nedpool *p, threadcache *tc, int *lastUsed, size_t size) THROWSPEC +{ /* Gets called when thread's last used mspace is in use. The strategy + is to run through the list of all available mspaces looking for an + unlocked one and if we fail, we create a new one so long as we don't + exceed p->threads */ + int n, end; + for(n=end=*lastUsed+1; p->m[n]; end=++n) + { + if(TRY_LOCK(&p->m[n]->mutex)) goto found; + } + for(n=0; n<*lastUsed && p->m[n]; n++) + { + if(TRY_LOCK(&p->m[n]->mutex)) goto found; + } + if(end<p->threads) + { + mstate temp; + if(!(temp=(mstate) create_mspace(size, 1))) + goto badexit; + /* Now we're ready to modify the lists, we lock */ + ACQUIRE_LOCK(&p->mutex); + while(p->m[end] && end<p->threads) + end++; + if(end>=p->threads) + { /* Drat, must destroy it now */ + RELEASE_LOCK(&p->mutex); + destroy_mspace((mspace) temp); + goto badexit; + } + /* We really want to make sure this goes into memory now but we + have to be careful of breaking aliasing rules, so write it twice */ + { + volatile struct malloc_state **_m=(volatile struct malloc_state **) &p->m[end]; + *_m=(p->m[end]=temp); + } + ACQUIRE_LOCK(&p->m[end]->mutex); + /*printf("Created mspace idx %d\n", end);*/ + RELEASE_LOCK(&p->mutex); + n=end; + goto found; + } + /* Let it lock on the last one it used */ +badexit: + ACQUIRE_LOCK(&p->m[*lastUsed]->mutex); + return p->m[*lastUsed]; +found: + *lastUsed=n; + if(tc) + tc->mymspace=n; + else + { + if(TLSSET(p->mycache, (void *)(size_t)(-(n+1)))) abort(); + } + return p->m[n]; +} + +nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC +{ + nedpool *ret; + if(!(ret=(nedpool *) nedpcalloc(0, 1, sizeof(nedpool)))) return 0; + if(!InitPool(ret, capacity, threads)) + { + nedpfree(0, ret); + return 0; + } + return ret; +} +void neddestroypool(nedpool *p) THROWSPEC +{ + int n; + ACQUIRE_LOCK(&p->mutex); + DestroyCaches(p); + for(n=0; p->m[n]; n++) + { + destroy_mspace(p->m[n]); + p->m[n]=0; + } + RELEASE_LOCK(&p->mutex); + if(TLSFREE(p->mycache)) abort(); + nedpfree(0, p); +} + +void nedpsetvalue(nedpool *p, void *v) THROWSPEC +{ + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + p->uservalue=v; +} +void *nedgetvalue(nedpool **p, void *mem) THROWSPEC +{ + nedpool *np=0; + mchunkptr mcp=mem2chunk(mem); + mstate fm; + if(!(is_aligned(chunk2mem(mcp))) && mcp->head != FENCEPOST_HEAD) return 0; + if(!cinuse(mcp)) return 0; + if(!next_pinuse(mcp)) return 0; + if(!is_mmapped(mcp) && !pinuse(mcp)) + { + if(next_chunk(prev_chunk(mcp))!=mcp) return 0; + } + fm=get_mstate_for(mcp); + if(!ok_magic(fm)) return 0; + if(!ok_address(fm, mcp)) return 0; + if(!fm->extp) return 0; + np=(nedpool *) fm->extp; + if(p) *p=np; + return np->uservalue; +} + +void neddisablethreadcache(nedpool *p) THROWSPEC +{ + int mycache; + if(!p) + { + p=&syspool; + if(!syspool.threads) InitPool(&syspool, 0, -1); + } + mycache=(int)(size_t) TLSGET(p->mycache); + if(!mycache) + { /* Set to mspace 0 */ + if(TLSSET(p->mycache, (void *)-1)) abort(); + } + else if(mycache>0) + { /* Set to last used mspace */ + threadcache *tc=p->caches[mycache-1]; +#if defined(DEBUG) + printf("Threadcache utilisation: %lf%% in cache with %lf%% lost to other threads\n", + 100.0*tc->successes/tc->mallocs, 100.0*((double) tc->mallocs-tc->frees)/tc->mallocs); +#endif + if(TLSSET(p->mycache, (void *)(size_t)(-tc->mymspace))) abort(); + tc->frees++; + RemoveCacheEntries(p, tc, 0); + assert(!tc->freeInCache); + tc->mymspace=-1; + tc->threadid=0; + mspace_free(0, p->caches[mycache-1]); + p->caches[mycache-1]=0; + } +} + +#define GETMSPACE(m,p,tc,ms,s,action) \ + do \ + { \ + mstate m = GetMSpace((p),(tc),(ms),(s)); \ + action; \ + RELEASE_LOCK(&m->mutex); \ + } while (0) + +static FORCEINLINE mstate GetMSpace(nedpool *p, threadcache *tc, int mymspace, size_t size) THROWSPEC +{ /* Returns a locked and ready for use mspace */ + mstate m=p->m[mymspace]; + assert(m); + if(!TRY_LOCK(&p->m[mymspace]->mutex)) m=FindMSpace(p, tc, &mymspace, size);\ + /*assert(IS_LOCKED(&p->m[mymspace]->mutex));*/ + return m; +} +static FORCEINLINE void GetThreadCache(nedpool **p, threadcache **tc, int *mymspace, size_t *size) THROWSPEC +{ + int mycache; + if(size && *size<sizeof(threadcacheblk)) *size=sizeof(threadcacheblk); + if(!*p) + { + *p=&syspool; + if(!syspool.threads) InitPool(&syspool, 0, -1); + } + mycache=(int)(size_t) TLSGET((*p)->mycache); + if(mycache>0) + { + *tc=(*p)->caches[mycache-1]; + *mymspace=(*tc)->mymspace; + } + else if(!mycache) + { + *tc=AllocCache(*p); + if(!*tc) + { /* Disable */ + if(TLSSET((*p)->mycache, (void *)-1)) abort(); + *mymspace=0; + } + else + *mymspace=(*tc)->mymspace; + } + else + { + *tc=0; + *mymspace=-mycache-1; + } + assert(*mymspace>=0); + assert((long)(size_t)CURRENT_THREAD==(*tc)->threadid); +#ifdef FULLSANITYCHECKS + if(*tc) + { + if(*(unsigned int *)"NEDMALC1"!=(*tc)->magic1 || *(unsigned int *)"NEDMALC2"!=(*tc)->magic2) + { + abort(); + } + } +#endif +} + +void * nedpmalloc(nedpool *p, size_t size) THROWSPEC +{ + void *ret=0; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &size); +#if THREADCACHEMAX + if(tc && size<=THREADCACHEMAX) + { /* Use the thread cache */ + ret=threadcache_malloc(p, tc, &size); + } +#endif + if(!ret) + { /* Use this thread's mspace */ + GETMSPACE(m, p, tc, mymspace, size, + ret=mspace_malloc(m, size)); + } + return ret; +} +void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC +{ + size_t rsize=size*no; + void *ret=0; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &rsize); +#if THREADCACHEMAX + if(tc && rsize<=THREADCACHEMAX) + { /* Use the thread cache */ + if((ret=threadcache_malloc(p, tc, &rsize))) + memset(ret, 0, rsize); + } +#endif + if(!ret) + { /* Use this thread's mspace */ + GETMSPACE(m, p, tc, mymspace, rsize, + ret=mspace_calloc(m, 1, rsize)); + } + return ret; +} +void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC +{ + void *ret=0; + threadcache *tc; + int mymspace; + if(!mem) return nedpmalloc(p, size); + GetThreadCache(&p, &tc, &mymspace, &size); +#if THREADCACHEMAX + if(tc && size && size<=THREADCACHEMAX) + { /* Use the thread cache */ + size_t memsize=nedblksize(mem); + assert(memsize); + if((ret=threadcache_malloc(p, tc, &size))) + { + memcpy(ret, mem, memsize<size ? memsize : size); + if(memsize<=THREADCACHEMAX) + threadcache_free(p, tc, mymspace, mem, memsize); + else + mspace_free(0, mem); + } + } +#endif + if(!ret) + { /* Reallocs always happen in the mspace they happened in, so skip + locking the preferred mspace for this thread */ + ret=mspace_realloc(0, mem, size); + } + return ret; +} +void nedpfree(nedpool *p, void *mem) THROWSPEC +{ /* Frees always happen in the mspace they happened in, so skip + locking the preferred mspace for this thread */ + threadcache *tc; + int mymspace; + size_t memsize; + assert(mem); + GetThreadCache(&p, &tc, &mymspace, 0); +#if THREADCACHEMAX + memsize=nedblksize(mem); + assert(memsize); + if(mem && tc && memsize<=(THREADCACHEMAX+CHUNK_OVERHEAD)) + threadcache_free(p, tc, mymspace, mem, memsize); + else +#endif + mspace_free(0, mem); +} +void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC +{ + void *ret; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &bytes); + { /* Use this thread's mspace */ + GETMSPACE(m, p, tc, mymspace, bytes, + ret=mspace_memalign(m, alignment, bytes)); + } + return ret; +} +#if !NO_MALLINFO +struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC +{ + int n; + struct mallinfo ret={0}; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + struct mallinfo t=mspace_mallinfo(p->m[n]); + ret.arena+=t.arena; + ret.ordblks+=t.ordblks; + ret.hblkhd+=t.hblkhd; + ret.usmblks+=t.usmblks; + ret.uordblks+=t.uordblks; + ret.fordblks+=t.fordblks; + ret.keepcost+=t.keepcost; + } + return ret; +} +#endif +int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC +{ + return mspace_mallopt(parno, value); +} +int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC +{ + int n, ret=0; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + ret+=mspace_trim(p->m[n], pad); + } + return ret; +} +void nedpmalloc_stats(nedpool *p) THROWSPEC +{ + int n; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + mspace_malloc_stats(p->m[n]); + } +} +size_t nedpmalloc_footprint(nedpool *p) THROWSPEC +{ + size_t ret=0; + int n; + if(!p) { p=&syspool; if(!syspool.threads) InitPool(&syspool, 0, -1); } + for(n=0; p->m[n]; n++) + { + ret+=mspace_footprint(p->m[n]); + } + return ret; +} +void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC +{ + void **ret; + threadcache *tc; + int mymspace; + GetThreadCache(&p, &tc, &mymspace, &elemsize); + GETMSPACE(m, p, tc, mymspace, elemsno*elemsize, + ret=mspace_independent_calloc(m, elemsno, elemsize, chunks)); + return ret; +} +void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC +{ + void **ret; + threadcache *tc; + int mymspace; + size_t i, *adjustedsizes=(size_t *) alloca(elems*sizeof(size_t)); + if(!adjustedsizes) return 0; + for(i=0; i<elems; i++) + adjustedsizes[i]=sizes[i]<sizeof(threadcacheblk) ? sizeof(threadcacheblk) : sizes[i]; + GetThreadCache(&p, &tc, &mymspace, 0); + GETMSPACE(m, p, tc, mymspace, 0, + ret=mspace_independent_comalloc(m, elems, adjustedsizes, chunks)); + return ret; +} + +#ifdef OVERRIDE_STRDUP +/* + * This implementation is purely there to override the libc version, to + * avoid a crash due to allocation and free on different 'heaps'. + */ +char *strdup(const char *s1) +{ + char *s2 = 0; + if (s1) { + s2 = malloc(strlen(s1) + 1); + strcpy(s2, s1); + } + return s2; +} +#endif + +#if defined(__cplusplus) +} +#endif diff --git a/compat/nedmalloc/nedmalloc.h b/compat/nedmalloc/nedmalloc.h new file mode 100644 index 0000000000..f960e66063 --- /dev/null +++ b/compat/nedmalloc/nedmalloc.h @@ -0,0 +1,180 @@ +/* nedalloc, an alternative malloc implementation for multiple threads without +lock contention based on dlmalloc v2.8.3. (C) 2005 Niall Douglas + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. +*/ + +#ifndef NEDMALLOC_H +#define NEDMALLOC_H + + +/* See malloc.c.h for what each function does. + +REPLACE_SYSTEM_ALLOCATOR causes nedalloc's functions to be called malloc, +free etc. instead of nedmalloc, nedfree etc. You may or may not want this. + +NO_NED_NAMESPACE prevents the functions from being defined in the nedalloc +namespace when in C++ (uses the global namespace instead). + +EXTSPEC can be defined to be __declspec(dllexport) or +__attribute__ ((visibility("default"))) or whatever you like. It defaults +to extern. + +USE_LOCKS can be 2 if you want to define your own MLOCK_T, INITIAL_LOCK, +ACQUIRE_LOCK, RELEASE_LOCK, TRY_LOCK, IS_LOCKED and NULL_LOCK_INITIALIZER. + +*/ + +#include <stddef.h> /* for size_t */ + +#ifndef EXTSPEC + #define EXTSPEC extern +#endif + +#if defined(_MSC_VER) && _MSC_VER>=1400 + #define MALLOCATTR __declspec(restrict) +#endif +#ifdef __GNUC__ + #define MALLOCATTR __attribute__ ((malloc)) +#endif +#ifndef MALLOCATTR + #define MALLOCATTR +#endif + +#ifdef REPLACE_SYSTEM_ALLOCATOR + #define nedmalloc malloc + #define nedcalloc calloc + #define nedrealloc realloc + #define nedfree free + #define nedmemalign memalign + #define nedmallinfo mallinfo + #define nedmallopt mallopt + #define nedmalloc_trim malloc_trim + #define nedmalloc_stats malloc_stats + #define nedmalloc_footprint malloc_footprint + #define nedindependent_calloc independent_calloc + #define nedindependent_comalloc independent_comalloc + #ifdef _MSC_VER + #define nedblksize _msize + #endif +#endif + +#ifndef NO_MALLINFO +#define NO_MALLINFO 0 +#endif + +#if !NO_MALLINFO +struct mallinfo; +#endif + +#if defined(__cplusplus) + #if !defined(NO_NED_NAMESPACE) +namespace nedalloc { + #else +extern "C" { + #endif + #define THROWSPEC throw() +#else + #define THROWSPEC +#endif + +/* These are the global functions */ + +/* Gets the usable size of an allocated block. Note this will always be bigger than what was +asked for due to rounding etc. +*/ +EXTSPEC size_t nedblksize(void *mem) THROWSPEC; + +EXTSPEC void nedsetvalue(void *v) THROWSPEC; + +EXTSPEC MALLOCATTR void * nedmalloc(size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedcalloc(size_t no, size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedrealloc(void *mem, size_t size) THROWSPEC; +EXTSPEC void nedfree(void *mem) THROWSPEC; +EXTSPEC MALLOCATTR void * nedmemalign(size_t alignment, size_t bytes) THROWSPEC; +#if !NO_MALLINFO +EXTSPEC struct mallinfo nedmallinfo(void) THROWSPEC; +#endif +EXTSPEC int nedmallopt(int parno, int value) THROWSPEC; +EXTSPEC int nedmalloc_trim(size_t pad) THROWSPEC; +EXTSPEC void nedmalloc_stats(void) THROWSPEC; +EXTSPEC size_t nedmalloc_footprint(void) THROWSPEC; +EXTSPEC MALLOCATTR void **nedindependent_calloc(size_t elemsno, size_t elemsize, void **chunks) THROWSPEC; +EXTSPEC MALLOCATTR void **nedindependent_comalloc(size_t elems, size_t *sizes, void **chunks) THROWSPEC; + +/* These are the pool functions */ +struct nedpool_t; +typedef struct nedpool_t nedpool; + +/* Creates a memory pool for use with the nedp* functions below. +Capacity is how much to allocate immediately (if you know you'll be allocating a lot +of memory very soon) which you can leave at zero. Threads specifies how many threads +will *normally* be accessing the pool concurrently. Setting this to zero means it +extends on demand, but be careful of this as it can rapidly consume system resources +where bursts of concurrent threads use a pool at once. +*/ +EXTSPEC MALLOCATTR nedpool *nedcreatepool(size_t capacity, int threads) THROWSPEC; + +/* Destroys a memory pool previously created by nedcreatepool(). +*/ +EXTSPEC void neddestroypool(nedpool *p) THROWSPEC; + +/* Sets a value to be associated with a pool. You can retrieve this value by passing +any memory block allocated from that pool. +*/ +EXTSPEC void nedpsetvalue(nedpool *p, void *v) THROWSPEC; +/* Gets a previously set value using nedpsetvalue() or zero if memory is unknown. +Optionally can also retrieve pool. +*/ +EXTSPEC void *nedgetvalue(nedpool **p, void *mem) THROWSPEC; + +/* Disables the thread cache for the calling thread, returning any existing cache +data to the central pool. +*/ +EXTSPEC void neddisablethreadcache(nedpool *p) THROWSPEC; + +EXTSPEC MALLOCATTR void * nedpmalloc(nedpool *p, size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedpcalloc(nedpool *p, size_t no, size_t size) THROWSPEC; +EXTSPEC MALLOCATTR void * nedprealloc(nedpool *p, void *mem, size_t size) THROWSPEC; +EXTSPEC void nedpfree(nedpool *p, void *mem) THROWSPEC; +EXTSPEC MALLOCATTR void * nedpmemalign(nedpool *p, size_t alignment, size_t bytes) THROWSPEC; +#if !NO_MALLINFO +EXTSPEC struct mallinfo nedpmallinfo(nedpool *p) THROWSPEC; +#endif +EXTSPEC int nedpmallopt(nedpool *p, int parno, int value) THROWSPEC; +EXTSPEC int nedpmalloc_trim(nedpool *p, size_t pad) THROWSPEC; +EXTSPEC void nedpmalloc_stats(nedpool *p) THROWSPEC; +EXTSPEC size_t nedpmalloc_footprint(nedpool *p) THROWSPEC; +EXTSPEC MALLOCATTR void **nedpindependent_calloc(nedpool *p, size_t elemsno, size_t elemsize, void **chunks) THROWSPEC; +EXTSPEC MALLOCATTR void **nedpindependent_comalloc(nedpool *p, size_t elems, size_t *sizes, void **chunks) THROWSPEC; + +#if defined(__cplusplus) +} +#endif + +#undef MALLOCATTR +#undef EXTSPEC + +#endif diff --git a/compat/obstack.c b/compat/obstack.c new file mode 100644 index 0000000000..e276ccd7b3 --- /dev/null +++ b/compat/obstack.c @@ -0,0 +1,413 @@ +/* obstack.c - subroutines used implicitly by object stack macros + Copyright (C) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, + 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +#include "git-compat-util.h" +#include <gettext.h> +#include "obstack.h" + +/* NOTE BEFORE MODIFYING THIS FILE: This version number must be + incremented whenever callers compiled using an old obstack.h can no + longer properly call the functions in this obstack.c. */ +#define OBSTACK_INTERFACE_VERSION 1 + +/* Comment out all this code if we are using the GNU C Library, and are not + actually compiling the library itself, and the installed library + supports the same library interface we do. This code is part of the GNU + C Library, but also included in many other GNU distributions. Compiling + and linking in this code is a waste when using the GNU C library + (especially if it is a shared library). Rather than having every GNU + program understand `configure --with-gnu-libc' and omit the object + files, it is simpler to just do this in the source for each such file. */ + +#include <stdio.h> /* Random thing to get __GNU_LIBRARY__. */ +#if !defined _LIBC && defined __GNU_LIBRARY__ && __GNU_LIBRARY__ > 1 +# include <gnu-versions.h> +# if _GNU_OBSTACK_INTERFACE_VERSION == OBSTACK_INTERFACE_VERSION +# define ELIDE_CODE +# endif +#endif + +#include <stddef.h> + +#ifndef ELIDE_CODE + + +# if HAVE_INTTYPES_H +# include <inttypes.h> +# endif +# if HAVE_STDINT_H || defined _LIBC +# include <stdint.h> +# endif + +/* Determine default alignment. */ +union fooround +{ + uintmax_t i; + long double d; + void *p; +}; +struct fooalign +{ + char c; + union fooround u; +}; +/* If malloc were really smart, it would round addresses to DEFAULT_ALIGNMENT. + But in fact it might be less smart and round addresses to as much as + DEFAULT_ROUNDING. So we prepare for it to do that. */ +enum + { + DEFAULT_ALIGNMENT = offsetof (struct fooalign, u), + DEFAULT_ROUNDING = sizeof (union fooround) + }; + +/* When we copy a long block of data, this is the unit to do it with. + On some machines, copying successive ints does not work; + in such a case, redefine COPYING_UNIT to `long' (if that works) + or `char' as a last resort. */ +# ifndef COPYING_UNIT +# define COPYING_UNIT int +# endif + + +/* The functions allocating more room by calling `obstack_chunk_alloc' + jump to the handler pointed to by `obstack_alloc_failed_handler'. + This can be set to a user defined function which should either + abort gracefully or use longjump - but shouldn't return. This + variable by default points to the internal function + `print_and_abort'. */ +static void print_and_abort (void); +void (*obstack_alloc_failed_handler) (void) = print_and_abort; + +# ifdef _LIBC +# if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) +/* A looong time ago (before 1994, anyway; we're not sure) this global variable + was used by non-GNU-C macros to avoid multiple evaluation. The GNU C + library still exports it because somebody might use it. */ +struct obstack *_obstack_compat; +compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0); +# endif +# endif + +/* Define a macro that either calls functions with the traditional malloc/free + calling interface, or calls functions with the mmalloc/mfree interface + (that adds an extra first argument), based on the state of use_extra_arg. + For free, do not use ?:, since some compilers, like the MIPS compilers, + do not allow (expr) ? void : void. */ + +# define CALL_CHUNKFUN(h, size) \ + (((h) -> use_extra_arg) \ + ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \ + : (*(struct _obstack_chunk *(*) (long)) (h)->chunkfun) ((size))) + +# define CALL_FREEFUN(h, old_chunk) \ + do { \ + if ((h) -> use_extra_arg) \ + (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \ + else \ + (*(void (*) (void *)) (h)->freefun) ((old_chunk)); \ + } while (0) + + +/* Initialize an obstack H for use. Specify chunk size SIZE (0 means default). + Objects start on multiples of ALIGNMENT (0 means use default). + CHUNKFUN is the function to use to allocate chunks, + and FREEFUN the function to free them. + + Return nonzero if successful, calls obstack_alloc_failed_handler if + allocation fails. */ + +int +_obstack_begin (struct obstack *h, + int size, int alignment, + void *(*chunkfun) (long), + void (*freefun) (void *)) +{ + register struct _obstack_chunk *chunk; /* points to new chunk */ + + if (alignment == 0) + alignment = DEFAULT_ALIGNMENT; + if (size == 0) + /* Default size is what GNU malloc can fit in a 4096-byte block. */ + { + /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc. + Use the values for range checking, because if range checking is off, + the extra bytes won't be missed terribly, but if range checking is on + and we used a larger request, a whole extra 4096 bytes would be + allocated. + + These number are irrelevant to the new GNU malloc. I suspect it is + less sensitive to the size of the request. */ + int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1)) + + 4 + DEFAULT_ROUNDING - 1) + & ~(DEFAULT_ROUNDING - 1)); + size = 4096 - extra; + } + + h->chunkfun = (struct _obstack_chunk * (*)(void *, long)) chunkfun; + h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun; + h->chunk_size = size; + h->alignment_mask = alignment - 1; + h->use_extra_arg = 0; + + chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size); + if (!chunk) + (*obstack_alloc_failed_handler) (); + h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents, + alignment - 1); + h->chunk_limit = chunk->limit + = (char *) chunk + h->chunk_size; + chunk->prev = NULL; + /* The initial chunk now contains no empty object. */ + h->maybe_empty_object = 0; + h->alloc_failed = 0; + return 1; +} + +int +_obstack_begin_1 (struct obstack *h, int size, int alignment, + void *(*chunkfun) (void *, long), + void (*freefun) (void *, void *), + void *arg) +{ + register struct _obstack_chunk *chunk; /* points to new chunk */ + + if (alignment == 0) + alignment = DEFAULT_ALIGNMENT; + if (size == 0) + /* Default size is what GNU malloc can fit in a 4096-byte block. */ + { + /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc. + Use the values for range checking, because if range checking is off, + the extra bytes won't be missed terribly, but if range checking is on + and we used a larger request, a whole extra 4096 bytes would be + allocated. + + These number are irrelevant to the new GNU malloc. I suspect it is + less sensitive to the size of the request. */ + int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1)) + + 4 + DEFAULT_ROUNDING - 1) + & ~(DEFAULT_ROUNDING - 1)); + size = 4096 - extra; + } + + h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun; + h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun; + h->chunk_size = size; + h->alignment_mask = alignment - 1; + h->extra_arg = arg; + h->use_extra_arg = 1; + + chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size); + if (!chunk) + (*obstack_alloc_failed_handler) (); + h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents, + alignment - 1); + h->chunk_limit = chunk->limit + = (char *) chunk + h->chunk_size; + chunk->prev = NULL; + /* The initial chunk now contains no empty object. */ + h->maybe_empty_object = 0; + h->alloc_failed = 0; + return 1; +} + +/* Allocate a new current chunk for the obstack *H + on the assumption that LENGTH bytes need to be added + to the current object, or a new object of length LENGTH allocated. + Copies any partial object from the end of the old chunk + to the beginning of the new one. */ + +void +_obstack_newchunk (struct obstack *h, int length) +{ + register struct _obstack_chunk *old_chunk = h->chunk; + register struct _obstack_chunk *new_chunk; + register long new_size; + register long obj_size = h->next_free - h->object_base; + register long i; + long already; + char *object_base; + + /* Compute size for new chunk. */ + new_size = (obj_size + length) + (obj_size >> 3) + h->alignment_mask + 100; + if (new_size < h->chunk_size) + new_size = h->chunk_size; + + /* Allocate and initialize the new chunk. */ + new_chunk = CALL_CHUNKFUN (h, new_size); + if (!new_chunk) + (*obstack_alloc_failed_handler) (); + h->chunk = new_chunk; + new_chunk->prev = old_chunk; + new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size; + + /* Compute an aligned object_base in the new chunk */ + object_base = + __PTR_ALIGN ((char *) new_chunk, new_chunk->contents, h->alignment_mask); + + /* Move the existing object to the new chunk. + Word at a time is fast and is safe if the object + is sufficiently aligned. */ + if (h->alignment_mask + 1 >= DEFAULT_ALIGNMENT) + { + for (i = obj_size / sizeof (COPYING_UNIT) - 1; + i >= 0; i--) + ((COPYING_UNIT *)object_base)[i] + = ((COPYING_UNIT *)h->object_base)[i]; + /* We used to copy the odd few remaining bytes as one extra COPYING_UNIT, + but that can cross a page boundary on a machine + which does not do strict alignment for COPYING_UNITS. */ + already = obj_size / sizeof (COPYING_UNIT) * sizeof (COPYING_UNIT); + } + else + already = 0; + /* Copy remaining bytes one by one. */ + for (i = already; i < obj_size; i++) + object_base[i] = h->object_base[i]; + + /* If the object just copied was the only data in OLD_CHUNK, + free that chunk and remove it from the chain. + But not if that chunk might contain an empty object. */ + if (! h->maybe_empty_object + && (h->object_base + == __PTR_ALIGN ((char *) old_chunk, old_chunk->contents, + h->alignment_mask))) + { + new_chunk->prev = old_chunk->prev; + CALL_FREEFUN (h, old_chunk); + } + + h->object_base = object_base; + h->next_free = h->object_base + obj_size; + /* The new chunk certainly contains no empty object yet. */ + h->maybe_empty_object = 0; +} +# ifdef _LIBC +libc_hidden_def (_obstack_newchunk) +# endif + +/* Return nonzero if object OBJ has been allocated from obstack H. + This is here for debugging. + If you use it in a program, you are probably losing. */ + +/* Suppress -Wmissing-prototypes warning. We don't want to declare this in + obstack.h because it is just for debugging. */ +int _obstack_allocated_p (struct obstack *h, void *obj); + +int +_obstack_allocated_p (struct obstack *h, void *obj) +{ + register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */ + register struct _obstack_chunk *plp; /* point to previous chunk if any */ + + lp = (h)->chunk; + /* We use >= rather than > since the object cannot be exactly at + the beginning of the chunk but might be an empty object exactly + at the end of an adjacent chunk. */ + while (lp != NULL && ((void *) lp >= obj || (void *) (lp)->limit < obj)) + { + plp = lp->prev; + lp = plp; + } + return lp != NULL; +} + +/* Free objects in obstack H, including OBJ and everything allocate + more recently than OBJ. If OBJ is zero, free everything in H. */ + +# undef obstack_free + +void +obstack_free (struct obstack *h, void *obj) +{ + register struct _obstack_chunk *lp; /* below addr of any objects in this chunk */ + register struct _obstack_chunk *plp; /* point to previous chunk if any */ + + lp = h->chunk; + /* We use >= because there cannot be an object at the beginning of a chunk. + But there can be an empty object at that address + at the end of another chunk. */ + while (lp != NULL && ((void *) lp >= obj || (void *) (lp)->limit < obj)) + { + plp = lp->prev; + CALL_FREEFUN (h, lp); + lp = plp; + /* If we switch chunks, we can't tell whether the new current + chunk contains an empty object, so assume that it may. */ + h->maybe_empty_object = 1; + } + if (lp) + { + h->object_base = h->next_free = (char *) (obj); + h->chunk_limit = lp->limit; + h->chunk = lp; + } + else if (obj != NULL) + /* obj is not in any of the chunks! */ + abort (); +} + +# ifdef _LIBC +/* Older versions of libc used a function _obstack_free intended to be + called by non-GCC compilers. */ +strong_alias (obstack_free, _obstack_free) +# endif + +int +_obstack_memory_used (struct obstack *h) +{ + register struct _obstack_chunk* lp; + register int nbytes = 0; + + for (lp = h->chunk; lp != NULL; lp = lp->prev) + { + nbytes += lp->limit - (char *) lp; + } + return nbytes; +} + +# ifdef _LIBC +# include <libio/iolibio.h> +# endif + +# ifndef __attribute__ +/* This feature is available in gcc versions 2.5 and later. */ +# if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 5) +# define __attribute__(Spec) /* empty */ +# endif +# endif + +static void +print_and_abort (void) +{ + /* Don't change any of these strings. Yes, it would be possible to add + the newline to the string and use fputs or so. But this must not + happen because the "memory exhausted" message appears in other places + like this and the translation should be reused instead of creating + a very similar string which requires a separate translation. */ +# ifdef _LIBC + (void) __fxprintf (NULL, "%s\n", _("memory exhausted")); +# else + fprintf (stderr, "%s\n", _("memory exhausted")); +# endif + exit (1); +} + +#endif /* !ELIDE_CODE */ diff --git a/compat/obstack.h b/compat/obstack.h new file mode 100644 index 0000000000..ceb4bdbcdd --- /dev/null +++ b/compat/obstack.h @@ -0,0 +1,506 @@ +/* obstack.h - object stack macros + Copyright (C) 1988-1994,1996-1999,2003,2004,2005,2009 + Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + Boston, MA 02110-1301, USA. */ + +/* Summary: + +All the apparent functions defined here are macros. The idea +is that you would use these pre-tested macros to solve a +very specific set of problems, and they would run fast. +Caution: no side-effects in arguments please!! They may be +evaluated MANY times!! + +These macros operate a stack of objects. Each object starts life +small, and may grow to maturity. (Consider building a word syllable +by syllable.) An object can move while it is growing. Once it has +been "finished" it never changes address again. So the "top of the +stack" is typically an immature growing object, while the rest of the +stack is of mature, fixed size and fixed address objects. + +These routines grab large chunks of memory, using a function you +supply, called `obstack_chunk_alloc'. On occasion, they free chunks, +by calling `obstack_chunk_free'. You must define them and declare +them before using any obstack macros. + +Each independent stack is represented by a `struct obstack'. +Each of the obstack macros expects a pointer to such a structure +as the first argument. + +One motivation for this package is the problem of growing char strings +in symbol tables. Unless you are "fascist pig with a read-only mind" +--Gosper's immortal quote from HAKMEM item 154, out of context--you +would not like to put any arbitrary upper limit on the length of your +symbols. + +In practice this often means you will build many short symbols and a +few long symbols. At the time you are reading a symbol you don't know +how long it is. One traditional method is to read a symbol into a +buffer, realloc()ating the buffer every time you try to read a symbol +that is longer than the buffer. This is beaut, but you still will +want to copy the symbol from the buffer to a more permanent +symbol-table entry say about half the time. + +With obstacks, you can work differently. Use one obstack for all symbol +names. As you read a symbol, grow the name in the obstack gradually. +When the name is complete, finalize it. Then, if the symbol exists already, +free the newly read name. + +The way we do this is to take a large chunk, allocating memory from +low addresses. When you want to build a symbol in the chunk you just +add chars above the current "high water mark" in the chunk. When you +have finished adding chars, because you got to the end of the symbol, +you know how long the chars are, and you can create a new object. +Mostly the chars will not burst over the highest address of the chunk, +because you would typically expect a chunk to be (say) 100 times as +long as an average object. + +In case that isn't clear, when we have enough chars to make up +the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed) +so we just point to it where it lies. No moving of chars is +needed and this is the second win: potentially long strings need +never be explicitly shuffled. Once an object is formed, it does not +change its address during its lifetime. + +When the chars burst over a chunk boundary, we allocate a larger +chunk, and then copy the partly formed object from the end of the old +chunk to the beginning of the new larger chunk. We then carry on +accreting characters to the end of the object as we normally would. + +A special macro is provided to add a single char at a time to a +growing object. This allows the use of register variables, which +break the ordinary 'growth' macro. + +Summary: + We allocate large chunks. + We carve out one object at a time from the current chunk. + Once carved, an object never moves. + We are free to append data of any size to the currently + growing object. + Exactly one object is growing in an obstack at any one time. + You can run one obstack per control block. + You may have as many control blocks as you dare. + Because of the way we do it, you can `unwind' an obstack + back to a previous state. (You may remove objects much + as you would with a stack.) +*/ + + +/* Don't do the contents of this file more than once. */ + +#ifndef _OBSTACK_H +#define _OBSTACK_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* We need the type of a pointer subtraction. If __PTRDIFF_TYPE__ is + defined, as with GNU C, use that; that way we don't pollute the + namespace with <stddef.h>'s symbols. Otherwise, include <stddef.h> + and use ptrdiff_t. */ + +#ifdef __PTRDIFF_TYPE__ +# define PTR_INT_TYPE __PTRDIFF_TYPE__ +#else +# include <stddef.h> +# define PTR_INT_TYPE ptrdiff_t +#endif + +/* If B is the base of an object addressed by P, return the result of + aligning P to the next multiple of A + 1. B and P must be of type + char *. A + 1 must be a power of 2. */ + +#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A))) + +/* Similar to _BPTR_ALIGN (B, P, A), except optimize the common case + where pointers can be converted to integers, aligned as integers, + and converted back again. If PTR_INT_TYPE is narrower than a + pointer (e.g., the AS/400), play it safe and compute the alignment + relative to B. Otherwise, use the faster strategy of computing the + alignment relative to 0. */ + +#define __PTR_ALIGN(B, P, A) \ + __BPTR_ALIGN (sizeof (PTR_INT_TYPE) < sizeof (void *) ? (B) : (char *) 0, \ + P, A) + +#include <string.h> + +struct _obstack_chunk /* Lives at front of each chunk. */ +{ + char *limit; /* 1 past end of this chunk */ + struct _obstack_chunk *prev; /* address of prior chunk or NULL */ + char contents[4]; /* objects begin here */ +}; + +struct obstack /* control current object in current chunk */ +{ + long chunk_size; /* preferred size to allocate chunks in */ + struct _obstack_chunk *chunk; /* address of current struct obstack_chunk */ + char *object_base; /* address of object we are building */ + char *next_free; /* where to add next char to current object */ + char *chunk_limit; /* address of char after current chunk */ + union + { + PTR_INT_TYPE tempint; + void *tempptr; + } temp; /* Temporary for some macros. */ + int alignment_mask; /* Mask of alignment for each object. */ + /* These prototypes vary based on `use_extra_arg', and we use + casts to the prototypeless function type in all assignments, + but having prototypes here quiets -Wstrict-prototypes. */ + struct _obstack_chunk *(*chunkfun) (void *, long); + void (*freefun) (void *, struct _obstack_chunk *); + void *extra_arg; /* first arg for chunk alloc/dealloc funcs */ + unsigned use_extra_arg:1; /* chunk alloc/dealloc funcs take extra arg */ + unsigned maybe_empty_object:1;/* There is a possibility that the current + chunk contains a zero-length object. This + prevents freeing the chunk if we allocate + a bigger chunk to replace it. */ + unsigned alloc_failed:1; /* No longer used, as we now call the failed + handler on error, but retained for binary + compatibility. */ +}; + +/* Declare the external functions we use; they are in obstack.c. */ + +extern void _obstack_newchunk (struct obstack *, int); +extern int _obstack_begin (struct obstack *, int, int, + void *(*) (long), void (*) (void *)); +extern int _obstack_begin_1 (struct obstack *, int, int, + void *(*) (void *, long), + void (*) (void *, void *), void *); +extern int _obstack_memory_used (struct obstack *); + +void obstack_free (struct obstack *, void *); + + +/* Error handler called when `obstack_chunk_alloc' failed to allocate + more memory. This can be set to a user defined function which + should either abort gracefully or use longjump - but shouldn't + return. The default action is to print a message and abort. */ +extern void (*obstack_alloc_failed_handler) (void); + +/* Pointer to beginning of object being allocated or to be allocated next. + Note that this might not be the final address of the object + because a new chunk might be needed to hold the final size. */ + +#define obstack_base(h) ((void *) (h)->object_base) + +/* Size for allocating ordinary chunks. */ + +#define obstack_chunk_size(h) ((h)->chunk_size) + +/* Pointer to next byte not yet allocated in current chunk. */ + +#define obstack_next_free(h) ((h)->next_free) + +/* Mask specifying low bits that should be clear in address of an object. */ + +#define obstack_alignment_mask(h) ((h)->alignment_mask) + +/* To prevent prototype warnings provide complete argument list. */ +#define obstack_init(h) \ + _obstack_begin ((h), 0, 0, \ + (void *(*) (long)) obstack_chunk_alloc, \ + (void (*) (void *)) obstack_chunk_free) + +#define obstack_begin(h, size) \ + _obstack_begin ((h), (size), 0, \ + (void *(*) (long)) obstack_chunk_alloc, \ + (void (*) (void *)) obstack_chunk_free) + +#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun) \ + _obstack_begin ((h), (size), (alignment), \ + (void *(*) (long)) (chunkfun), \ + (void (*) (void *)) (freefun)) + +#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \ + _obstack_begin_1 ((h), (size), (alignment), \ + (void *(*) (void *, long)) (chunkfun), \ + (void (*) (void *, void *)) (freefun), (arg)) + +#define obstack_chunkfun(h, newchunkfun) \ + ((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun)) + +#define obstack_freefun(h, newfreefun) \ + ((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun)) + +#define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar)) + +#define obstack_blank_fast(h,n) ((h)->next_free += (n)) + +#define obstack_memory_used(h) _obstack_memory_used (h) + +#if defined __GNUC__ && defined __STDC__ && __STDC__ +/* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and + does not implement __extension__. But that compiler doesn't define + __GNUC_MINOR__. */ +# if __GNUC__ < 2 || (__NeXT__ && !__GNUC_MINOR__) +# define __extension__ +# endif + +/* For GNU C, if not -traditional, + we can define these macros to compute all args only once + without using a global variable. + Also, we can avoid using the `temp' slot, to make faster code. */ + +# define obstack_object_size(OBSTACK) \ + __extension__ \ + ({ struct obstack const *__o = (OBSTACK); \ + (unsigned) (__o->next_free - __o->object_base); }) + +# define obstack_room(OBSTACK) \ + __extension__ \ + ({ struct obstack const *__o = (OBSTACK); \ + (unsigned) (__o->chunk_limit - __o->next_free); }) + +# define obstack_make_room(OBSTACK,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->chunk_limit - __o->next_free < __len) \ + _obstack_newchunk (__o, __len); \ + (void) 0; }) + +# define obstack_empty_p(OBSTACK) \ + __extension__ \ + ({ struct obstack const *__o = (OBSTACK); \ + (__o->chunk->prev == 0 \ + && __o->next_free == __PTR_ALIGN ((char *) __o->chunk, \ + __o->chunk->contents, \ + __o->alignment_mask)); }) + +# define obstack_grow(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->next_free + __len > __o->chunk_limit) \ + _obstack_newchunk (__o, __len); \ + memcpy (__o->next_free, where, __len); \ + __o->next_free += __len; \ + (void) 0; }) + +# define obstack_grow0(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->next_free + __len + 1 > __o->chunk_limit) \ + _obstack_newchunk (__o, __len + 1); \ + memcpy (__o->next_free, where, __len); \ + __o->next_free += __len; \ + *(__o->next_free)++ = 0; \ + (void) 0; }) + +# define obstack_1grow(OBSTACK,datum) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + if (__o->next_free + 1 > __o->chunk_limit) \ + _obstack_newchunk (__o, 1); \ + obstack_1grow_fast (__o, datum); \ + (void) 0; }) + +/* These assume that the obstack alignment is good enough for pointers + or ints, and that the data added so far to the current object + shares that much alignment. */ + +# define obstack_ptr_grow(OBSTACK,datum) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + if (__o->next_free + sizeof (void *) > __o->chunk_limit) \ + _obstack_newchunk (__o, sizeof (void *)); \ + obstack_ptr_grow_fast (__o, datum); }) \ + +# define obstack_int_grow(OBSTACK,datum) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + if (__o->next_free + sizeof (int) > __o->chunk_limit) \ + _obstack_newchunk (__o, sizeof (int)); \ + obstack_int_grow_fast (__o, datum); }) + +# define obstack_ptr_grow_fast(OBSTACK,aptr) \ +__extension__ \ +({ struct obstack *__o1 = (OBSTACK); \ + *(const void **) __o1->next_free = (aptr); \ + __o1->next_free += sizeof (const void *); \ + (void) 0; }) + +# define obstack_int_grow_fast(OBSTACK,aint) \ +__extension__ \ +({ struct obstack *__o1 = (OBSTACK); \ + *(int *) __o1->next_free = (aint); \ + __o1->next_free += sizeof (int); \ + (void) 0; }) + +# define obstack_blank(OBSTACK,length) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + int __len = (length); \ + if (__o->chunk_limit - __o->next_free < __len) \ + _obstack_newchunk (__o, __len); \ + obstack_blank_fast (__o, __len); \ + (void) 0; }) + +# define obstack_alloc(OBSTACK,length) \ +__extension__ \ +({ struct obstack *__h = (OBSTACK); \ + obstack_blank (__h, (length)); \ + obstack_finish (__h); }) + +# define obstack_copy(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__h = (OBSTACK); \ + obstack_grow (__h, (where), (length)); \ + obstack_finish (__h); }) + +# define obstack_copy0(OBSTACK,where,length) \ +__extension__ \ +({ struct obstack *__h = (OBSTACK); \ + obstack_grow0 (__h, (where), (length)); \ + obstack_finish (__h); }) + +/* The local variable is named __o1 to avoid a name conflict + when obstack_blank is called. */ +# define obstack_finish(OBSTACK) \ +__extension__ \ +({ struct obstack *__o1 = (OBSTACK); \ + void *__value = (void *) __o1->object_base; \ + if (__o1->next_free == __value) \ + __o1->maybe_empty_object = 1; \ + __o1->next_free \ + = __PTR_ALIGN (__o1->object_base, __o1->next_free, \ + __o1->alignment_mask); \ + if (__o1->next_free - (char *)__o1->chunk \ + > __o1->chunk_limit - (char *)__o1->chunk) \ + __o1->next_free = __o1->chunk_limit; \ + __o1->object_base = __o1->next_free; \ + __value; }) + +# define obstack_free(OBSTACK, OBJ) \ +__extension__ \ +({ struct obstack *__o = (OBSTACK); \ + void *__obj = (OBJ); \ + if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit) \ + __o->next_free = __o->object_base = (char *)__obj; \ + else (obstack_free) (__o, __obj); }) + +#else /* not __GNUC__ or not __STDC__ */ + +# define obstack_object_size(h) \ + (unsigned) ((h)->next_free - (h)->object_base) + +# define obstack_room(h) \ + (unsigned) ((h)->chunk_limit - (h)->next_free) + +# define obstack_empty_p(h) \ + ((h)->chunk->prev == 0 \ + && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk, \ + (h)->chunk->contents, \ + (h)->alignment_mask)) + +/* Note that the call to _obstack_newchunk is enclosed in (..., 0) + so that we can avoid having void expressions + in the arms of the conditional expression. + Casting the third operand to void was tried before, + but some compilers won't accept it. */ + +# define obstack_make_room(h,length) \ +( (h)->temp.tempint = (length), \ + (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0)) + +# define obstack_grow(h,where,length) \ +( (h)->temp.tempint = (length), \ + (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0), \ + memcpy ((h)->next_free, where, (h)->temp.tempint), \ + (h)->next_free += (h)->temp.tempint) + +# define obstack_grow0(h,where,length) \ +( (h)->temp.tempint = (length), \ + (((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0), \ + memcpy ((h)->next_free, where, (h)->temp.tempint), \ + (h)->next_free += (h)->temp.tempint, \ + *((h)->next_free)++ = 0) + +# define obstack_1grow(h,datum) \ +( (((h)->next_free + 1 > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), 1), 0) : 0), \ + obstack_1grow_fast (h, datum)) + +# define obstack_ptr_grow(h,datum) \ +( (((h)->next_free + sizeof (char *) > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0), \ + obstack_ptr_grow_fast (h, datum)) + +# define obstack_int_grow(h,datum) \ +( (((h)->next_free + sizeof (int) > (h)->chunk_limit) \ + ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0), \ + obstack_int_grow_fast (h, datum)) + +# define obstack_ptr_grow_fast(h,aptr) \ + (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr)) + +# define obstack_int_grow_fast(h,aint) \ + (((int *) ((h)->next_free += sizeof (int)))[-1] = (aint)) + +# define obstack_blank(h,length) \ +( (h)->temp.tempint = (length), \ + (((h)->chunk_limit - (h)->next_free < (h)->temp.tempint) \ + ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0), \ + obstack_blank_fast (h, (h)->temp.tempint)) + +# define obstack_alloc(h,length) \ + (obstack_blank ((h), (length)), obstack_finish ((h))) + +# define obstack_copy(h,where,length) \ + (obstack_grow ((h), (where), (length)), obstack_finish ((h))) + +# define obstack_copy0(h,where,length) \ + (obstack_grow0 ((h), (where), (length)), obstack_finish ((h))) + +# define obstack_finish(h) \ +( ((h)->next_free == (h)->object_base \ + ? (((h)->maybe_empty_object = 1), 0) \ + : 0), \ + (h)->temp.tempptr = (h)->object_base, \ + (h)->next_free \ + = __PTR_ALIGN ((h)->object_base, (h)->next_free, \ + (h)->alignment_mask), \ + (((h)->next_free - (char *) (h)->chunk \ + > (h)->chunk_limit - (char *) (h)->chunk) \ + ? ((h)->next_free = (h)->chunk_limit) : 0), \ + (h)->object_base = (h)->next_free, \ + (h)->temp.tempptr) + +# define obstack_free(h,obj) \ +( (h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk, \ + ((((h)->temp.tempint > 0 \ + && (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk)) \ + ? (int) ((h)->next_free = (h)->object_base \ + = (h)->temp.tempint + (char *) (h)->chunk) \ + : (((obstack_free) ((h), (h)->temp.tempint + (char *) (h)->chunk), 0), 0))) + +#endif /* not __GNUC__ or not __STDC__ */ + +#ifdef __cplusplus +} /* C++ */ +#endif + +#endif /* obstack.h */ diff --git a/compat/poll/poll.c b/compat/poll/poll.c new file mode 100644 index 0000000000..a9b41d89f4 --- /dev/null +++ b/compat/poll/poll.c @@ -0,0 +1,614 @@ +/* Emulation for poll(2) + Contributed by Paolo Bonzini. + + Copyright 2001-2003, 2006-2011 Free Software Foundation, Inc. + + This file is part of gnulib. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* Tell gcc not to warn about the (nfd < 0) tests, below. */ +#if (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) || 4 < __GNUC__ +# pragma GCC diagnostic ignored "-Wtype-limits" +#endif + +#if defined(WIN32) +# include <malloc.h> +#endif + +#include <sys/types.h> + +/* Specification. */ +#include <poll.h> + +#include <errno.h> +#include <limits.h> +#include <assert.h> + +#if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ +# define WIN32_NATIVE +# if defined (_MSC_VER) && !defined(_WIN32_WINNT) +# define _WIN32_WINNT 0x0502 +# endif +# include <winsock2.h> +# include <windows.h> +# include <io.h> +# include <stdio.h> +# include <conio.h> +#else +# include <sys/time.h> +# include <sys/socket.h> +# ifndef NO_SYS_SELECT_H +# include <sys/select.h> +# endif +# include <unistd.h> +#endif + +#ifdef HAVE_SYS_IOCTL_H +# include <sys/ioctl.h> +#endif +#ifdef HAVE_SYS_FILIO_H +# include <sys/filio.h> +#endif + +#include <time.h> + +#ifndef INFTIM +# define INFTIM (-1) +#endif + +/* BeOS does not have MSG_PEEK. */ +#ifndef MSG_PEEK +# define MSG_PEEK 0 +#endif + +#ifdef WIN32_NATIVE + +#define IsConsoleHandle(h) (((long) (h) & 3) == 3) + +static BOOL +IsSocketHandle (HANDLE h) +{ + WSANETWORKEVENTS ev; + + if (IsConsoleHandle (h)) + return FALSE; + + /* Under Wine, it seems that getsockopt returns 0 for pipes too. + WSAEnumNetworkEvents instead distinguishes the two correctly. */ + ev.lNetworkEvents = 0xDEADBEEF; + WSAEnumNetworkEvents ((SOCKET) h, NULL, &ev); + return ev.lNetworkEvents != 0xDEADBEEF; +} + +/* Declare data structures for ntdll functions. */ +typedef struct _FILE_PIPE_LOCAL_INFORMATION { + ULONG NamedPipeType; + ULONG NamedPipeConfiguration; + ULONG MaximumInstances; + ULONG CurrentInstances; + ULONG InboundQuota; + ULONG ReadDataAvailable; + ULONG OutboundQuota; + ULONG WriteQuotaAvailable; + ULONG NamedPipeState; + ULONG NamedPipeEnd; +} FILE_PIPE_LOCAL_INFORMATION, *PFILE_PIPE_LOCAL_INFORMATION; + +typedef struct _IO_STATUS_BLOCK +{ + union { + DWORD Status; + PVOID Pointer; + } u; + ULONG_PTR Information; +} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK; + +typedef enum _FILE_INFORMATION_CLASS { + FilePipeLocalInformation = 24 +} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS; + +typedef DWORD (WINAPI *PNtQueryInformationFile) + (HANDLE, IO_STATUS_BLOCK *, VOID *, ULONG, FILE_INFORMATION_CLASS); + +# ifndef PIPE_BUF +# define PIPE_BUF 512 +# endif + +/* Compute revents values for file handle H. If some events cannot happen + for the handle, eliminate them from *P_SOUGHT. */ + +static int +win32_compute_revents (HANDLE h, int *p_sought) +{ + int i, ret, happened; + INPUT_RECORD *irbuffer; + DWORD avail, nbuffer; + BOOL bRet; + IO_STATUS_BLOCK iosb; + FILE_PIPE_LOCAL_INFORMATION fpli; + static PNtQueryInformationFile NtQueryInformationFile; + static BOOL once_only; + + switch (GetFileType (h)) + { + case FILE_TYPE_PIPE: + if (!once_only) + { + NtQueryInformationFile = (PNtQueryInformationFile) + GetProcAddress (GetModuleHandle ("ntdll.dll"), + "NtQueryInformationFile"); + once_only = TRUE; + } + + happened = 0; + if (PeekNamedPipe (h, NULL, 0, NULL, &avail, NULL) != 0) + { + if (avail) + happened |= *p_sought & (POLLIN | POLLRDNORM); + } + else if (GetLastError () == ERROR_BROKEN_PIPE) + happened |= POLLHUP; + + else + { + /* It was the write-end of the pipe. Check if it is writable. + If NtQueryInformationFile fails, optimistically assume the pipe is + writable. This could happen on Win9x, where NtQueryInformationFile + is not available, or if we inherit a pipe that doesn't permit + FILE_READ_ATTRIBUTES access on the write end (I think this should + not happen since WinXP SP2; WINE seems fine too). Otherwise, + ensure that enough space is available for atomic writes. */ + memset (&iosb, 0, sizeof (iosb)); + memset (&fpli, 0, sizeof (fpli)); + + if (!NtQueryInformationFile + || NtQueryInformationFile (h, &iosb, &fpli, sizeof (fpli), + FilePipeLocalInformation) + || fpli.WriteQuotaAvailable >= PIPE_BUF + || (fpli.OutboundQuota < PIPE_BUF && + fpli.WriteQuotaAvailable == fpli.OutboundQuota)) + happened |= *p_sought & (POLLOUT | POLLWRNORM | POLLWRBAND); + } + return happened; + + case FILE_TYPE_CHAR: + ret = WaitForSingleObject (h, 0); + if (!IsConsoleHandle (h)) + return ret == WAIT_OBJECT_0 ? *p_sought & ~(POLLPRI | POLLRDBAND) : 0; + + nbuffer = avail = 0; + bRet = GetNumberOfConsoleInputEvents (h, &nbuffer); + if (bRet) + { + /* Input buffer. */ + *p_sought &= POLLIN | POLLRDNORM; + if (nbuffer == 0) + return POLLHUP; + if (!*p_sought) + return 0; + + irbuffer = (INPUT_RECORD *) alloca (nbuffer * sizeof (INPUT_RECORD)); + bRet = PeekConsoleInput (h, irbuffer, nbuffer, &avail); + if (!bRet || avail == 0) + return POLLHUP; + + for (i = 0; i < avail; i++) + if (irbuffer[i].EventType == KEY_EVENT) + return *p_sought; + return 0; + } + else + { + /* Screen buffer. */ + *p_sought &= POLLOUT | POLLWRNORM | POLLWRBAND; + return *p_sought; + } + + default: + ret = WaitForSingleObject (h, 0); + if (ret == WAIT_OBJECT_0) + return *p_sought & ~(POLLPRI | POLLRDBAND); + + return *p_sought & (POLLOUT | POLLWRNORM | POLLWRBAND); + } +} + +/* Convert fd_sets returned by select into revents values. */ + +static int +win32_compute_revents_socket (SOCKET h, int sought, long lNetworkEvents) +{ + int happened = 0; + + if ((lNetworkEvents & (FD_READ | FD_ACCEPT | FD_CLOSE)) == FD_ACCEPT) + happened |= (POLLIN | POLLRDNORM) & sought; + + else if (lNetworkEvents & (FD_READ | FD_ACCEPT | FD_CLOSE)) + { + int r, error; + + char data[64]; + WSASetLastError (0); + r = recv (h, data, sizeof (data), MSG_PEEK); + error = WSAGetLastError (); + WSASetLastError (0); + + if (r > 0 || error == WSAENOTCONN) + happened |= (POLLIN | POLLRDNORM) & sought; + + /* Distinguish hung-up sockets from other errors. */ + else if (r == 0 || error == WSAESHUTDOWN || error == WSAECONNRESET + || error == WSAECONNABORTED || error == WSAENETRESET) + happened |= POLLHUP; + + else + happened |= POLLERR; + } + + if (lNetworkEvents & (FD_WRITE | FD_CONNECT)) + happened |= (POLLOUT | POLLWRNORM | POLLWRBAND) & sought; + + if (lNetworkEvents & FD_OOB) + happened |= (POLLPRI | POLLRDBAND) & sought; + + return happened; +} + +#else /* !MinGW */ + +/* Convert select(2) returned fd_sets into poll(2) revents values. */ +static int +compute_revents (int fd, int sought, fd_set *rfds, fd_set *wfds, fd_set *efds) +{ + int happened = 0; + if (FD_ISSET (fd, rfds)) + { + int r; + int socket_errno; + +# if defined __MACH__ && defined __APPLE__ + /* There is a bug in Mac OS X that causes it to ignore MSG_PEEK + for some kinds of descriptors. Detect if this descriptor is a + connected socket, a server socket, or something else using a + 0-byte recv, and use ioctl(2) to detect POLLHUP. */ + r = recv (fd, NULL, 0, MSG_PEEK); + socket_errno = (r < 0) ? errno : 0; + if (r == 0 || socket_errno == ENOTSOCK) + ioctl (fd, FIONREAD, &r); +# else + char data[64]; + r = recv (fd, data, sizeof (data), MSG_PEEK); + socket_errno = (r < 0) ? errno : 0; +# endif + if (r == 0) + happened |= POLLHUP; + + /* If the event happened on an unconnected server socket, + that's fine. */ + else if (r > 0 || ( /* (r == -1) && */ socket_errno == ENOTCONN)) + happened |= (POLLIN | POLLRDNORM) & sought; + + /* Distinguish hung-up sockets from other errors. */ + else if (socket_errno == ESHUTDOWN || socket_errno == ECONNRESET + || socket_errno == ECONNABORTED || socket_errno == ENETRESET) + happened |= POLLHUP; + + /* some systems can't use recv() on non-socket, including HP NonStop */ + else if (/* (r == -1) && */ socket_errno == ENOTSOCK) + happened |= (POLLIN | POLLRDNORM) & sought; + + else + happened |= POLLERR; + } + + if (FD_ISSET (fd, wfds)) + happened |= (POLLOUT | POLLWRNORM | POLLWRBAND) & sought; + + if (FD_ISSET (fd, efds)) + happened |= (POLLPRI | POLLRDBAND) & sought; + + return happened; +} +#endif /* !MinGW */ + +int +poll (struct pollfd *pfd, nfds_t nfd, int timeout) +{ +#ifndef WIN32_NATIVE + fd_set rfds, wfds, efds; + struct timeval tv; + struct timeval *ptv; + int maxfd, rc; + nfds_t i; + +# ifdef _SC_OPEN_MAX + static int sc_open_max = -1; + + if (nfd < 0 + || (nfd > sc_open_max + && (sc_open_max != -1 + || nfd > (sc_open_max = sysconf (_SC_OPEN_MAX))))) + { + errno = EINVAL; + return -1; + } +# else /* !_SC_OPEN_MAX */ +# ifdef OPEN_MAX + if (nfd < 0 || nfd > OPEN_MAX) + { + errno = EINVAL; + return -1; + } +# endif /* OPEN_MAX -- else, no check is needed */ +# endif /* !_SC_OPEN_MAX */ + + /* EFAULT is not necessary to implement, but let's do it in the + simplest case. */ + if (!pfd && nfd) + { + errno = EFAULT; + return -1; + } + + /* convert timeout number into a timeval structure */ + if (timeout == 0) + { + ptv = &tv; + ptv->tv_sec = 0; + ptv->tv_usec = 0; + } + else if (timeout > 0) + { + ptv = &tv; + ptv->tv_sec = timeout / 1000; + ptv->tv_usec = (timeout % 1000) * 1000; + } + else if (timeout == INFTIM) + /* wait forever */ + ptv = NULL; + else + { + errno = EINVAL; + return -1; + } + + /* create fd sets and determine max fd */ + maxfd = -1; + FD_ZERO (&rfds); + FD_ZERO (&wfds); + FD_ZERO (&efds); + for (i = 0; i < nfd; i++) + { + if (pfd[i].fd < 0) + continue; + + if (pfd[i].events & (POLLIN | POLLRDNORM)) + FD_SET (pfd[i].fd, &rfds); + + /* see select(2): "the only exceptional condition detectable + is out-of-band data received on a socket", hence we push + POLLWRBAND events onto wfds instead of efds. */ + if (pfd[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) + FD_SET (pfd[i].fd, &wfds); + if (pfd[i].events & (POLLPRI | POLLRDBAND)) + FD_SET (pfd[i].fd, &efds); + if (pfd[i].fd >= maxfd + && (pfd[i].events & (POLLIN | POLLOUT | POLLPRI + | POLLRDNORM | POLLRDBAND + | POLLWRNORM | POLLWRBAND))) + { + maxfd = pfd[i].fd; + if (maxfd > FD_SETSIZE) + { + errno = EOVERFLOW; + return -1; + } + } + } + + /* examine fd sets */ + rc = select (maxfd + 1, &rfds, &wfds, &efds, ptv); + if (rc < 0) + return rc; + + /* establish results */ + rc = 0; + for (i = 0; i < nfd; i++) + if (pfd[i].fd < 0) + pfd[i].revents = 0; + else + { + int happened = compute_revents (pfd[i].fd, pfd[i].events, + &rfds, &wfds, &efds); + if (happened) + { + pfd[i].revents = happened; + rc++; + } + } + + return rc; +#else + static struct timeval tv0; + static HANDLE hEvent; + WSANETWORKEVENTS ev; + HANDLE h, handle_array[FD_SETSIZE + 2]; + DWORD ret, wait_timeout, nhandles; + fd_set rfds, wfds, xfds; + BOOL poll_again; + MSG msg; + int rc = 0; + nfds_t i; + + if (nfd < 0 || timeout < -1) + { + errno = EINVAL; + return -1; + } + + if (!hEvent) + hEvent = CreateEvent (NULL, FALSE, FALSE, NULL); + +restart: + handle_array[0] = hEvent; + nhandles = 1; + FD_ZERO (&rfds); + FD_ZERO (&wfds); + FD_ZERO (&xfds); + + /* Classify socket handles and create fd sets. */ + for (i = 0; i < nfd; i++) + { + int sought = pfd[i].events; + pfd[i].revents = 0; + if (pfd[i].fd < 0) + continue; + if (!(sought & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM | POLLWRBAND + | POLLPRI | POLLRDBAND))) + continue; + + h = (HANDLE) _get_osfhandle (pfd[i].fd); + assert (h != NULL); + if (IsSocketHandle (h)) + { + int requested = FD_CLOSE; + + /* see above; socket handles are mapped onto select. */ + if (sought & (POLLIN | POLLRDNORM)) + { + requested |= FD_READ | FD_ACCEPT; + FD_SET ((SOCKET) h, &rfds); + } + if (sought & (POLLOUT | POLLWRNORM | POLLWRBAND)) + { + requested |= FD_WRITE | FD_CONNECT; + FD_SET ((SOCKET) h, &wfds); + } + if (sought & (POLLPRI | POLLRDBAND)) + { + requested |= FD_OOB; + FD_SET ((SOCKET) h, &xfds); + } + + if (requested) + WSAEventSelect ((SOCKET) h, hEvent, requested); + } + else + { + /* Poll now. If we get an event, do not poll again. Also, + screen buffer handles are waitable, and they'll block until + a character is available. win32_compute_revents eliminates + bits for the "wrong" direction. */ + pfd[i].revents = win32_compute_revents (h, &sought); + if (sought) + handle_array[nhandles++] = h; + if (pfd[i].revents) + timeout = 0; + } + } + + if (select (0, &rfds, &wfds, &xfds, &tv0) > 0) + { + /* Do MsgWaitForMultipleObjects anyway to dispatch messages, but + no need to call select again. */ + poll_again = FALSE; + wait_timeout = 0; + } + else + { + poll_again = TRUE; + if (timeout == INFTIM) + wait_timeout = INFINITE; + else + wait_timeout = timeout; + } + + for (;;) + { + ret = MsgWaitForMultipleObjects (nhandles, handle_array, FALSE, + wait_timeout, QS_ALLINPUT); + + if (ret == WAIT_OBJECT_0 + nhandles) + { + /* new input of some other kind */ + BOOL bRet; + while ((bRet = PeekMessage (&msg, NULL, 0, 0, PM_REMOVE)) != 0) + { + TranslateMessage (&msg); + DispatchMessage (&msg); + } + } + else + break; + } + + if (poll_again) + select (0, &rfds, &wfds, &xfds, &tv0); + + /* Place a sentinel at the end of the array. */ + handle_array[nhandles] = NULL; + nhandles = 1; + for (i = 0; i < nfd; i++) + { + int happened; + + if (pfd[i].fd < 0) + continue; + if (!(pfd[i].events & (POLLIN | POLLRDNORM | + POLLOUT | POLLWRNORM | POLLWRBAND))) + continue; + + h = (HANDLE) _get_osfhandle (pfd[i].fd); + if (h != handle_array[nhandles]) + { + /* It's a socket. */ + WSAEnumNetworkEvents ((SOCKET) h, NULL, &ev); + WSAEventSelect ((SOCKET) h, NULL, 0); + + /* If we're lucky, WSAEnumNetworkEvents already provided a way + to distinguish FD_READ and FD_ACCEPT; this saves a recv later. */ + if (FD_ISSET ((SOCKET) h, &rfds) + && !(ev.lNetworkEvents & (FD_READ | FD_ACCEPT))) + ev.lNetworkEvents |= FD_READ | FD_ACCEPT; + if (FD_ISSET ((SOCKET) h, &wfds)) + ev.lNetworkEvents |= FD_WRITE | FD_CONNECT; + if (FD_ISSET ((SOCKET) h, &xfds)) + ev.lNetworkEvents |= FD_OOB; + + happened = win32_compute_revents_socket ((SOCKET) h, pfd[i].events, + ev.lNetworkEvents); + } + else + { + /* Not a socket. */ + int sought = pfd[i].events; + happened = win32_compute_revents (h, &sought); + nhandles++; + } + + if ((pfd[i].revents |= happened) != 0) + rc++; + } + + if (!rc && timeout == INFTIM) + { + SleepEx (1, TRUE); + goto restart; + } + + return rc; +#endif +} diff --git a/compat/poll/poll.h b/compat/poll/poll.h new file mode 100644 index 0000000000..b7aa59d973 --- /dev/null +++ b/compat/poll/poll.h @@ -0,0 +1,53 @@ +/* Header for poll(2) emulation + Contributed by Paolo Bonzini. + + Copyright 2001, 2002, 2003, 2007, 2009, 2010 Free Software Foundation, Inc. + + This file is part of gnulib. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, + Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +#ifndef _GL_POLL_H +#define _GL_POLL_H + +/* fake a poll(2) environment */ +#define POLLIN 0x0001 /* any readable data available */ +#define POLLPRI 0x0002 /* OOB/Urgent readable data */ +#define POLLOUT 0x0004 /* file descriptor is writeable */ +#define POLLERR 0x0008 /* some poll error occurred */ +#define POLLHUP 0x0010 /* file descriptor was "hung up" */ +#define POLLNVAL 0x0020 /* requested events "invalid" */ +#define POLLRDNORM 0x0040 +#define POLLRDBAND 0x0080 +#define POLLWRNORM 0x0100 +#define POLLWRBAND 0x0200 + +struct pollfd +{ + int fd; /* which file descriptor to poll */ + short events; /* events we are interested in */ + short revents; /* events found on return */ +}; + +typedef unsigned long nfds_t; + +extern int poll (struct pollfd *pfd, nfds_t nfd, int timeout); + +/* Define INFTIM only if doing so conforms to POSIX. */ +#if !defined (_POSIX_C_SOURCE) && !defined (_XOPEN_SOURCE) +#define INFTIM (-1) +#endif + +#endif /* _GL_POLL_H */ diff --git a/compat/pread.c b/compat/pread.c new file mode 100644 index 0000000000..978cac4ec9 --- /dev/null +++ b/compat/pread.c @@ -0,0 +1,18 @@ +#include "../git-compat-util.h" + +ssize_t git_pread(int fd, void *buf, size_t count, off_t offset) +{ + off_t current_offset; + ssize_t rc; + + current_offset = lseek(fd, 0, SEEK_CUR); + + if (lseek(fd, offset, SEEK_SET) < 0) + return -1; + + rc = read_in_full(fd, buf, count); + + if (current_offset != lseek(fd, current_offset, SEEK_SET)) + return -1; + return rc; +} diff --git a/compat/precompose_utf8.c b/compat/precompose_utf8.c new file mode 100644 index 0000000000..95fe849e42 --- /dev/null +++ b/compat/precompose_utf8.c @@ -0,0 +1,181 @@ +/* + * Converts filenames from decomposed unicode into precomposed unicode. + * Used on MacOS X. + */ + +#define PRECOMPOSE_UNICODE_C + +#include "cache.h" +#include "utf8.h" +#include "precompose_utf8.h" + +typedef char *iconv_ibp; +static const char *repo_encoding = "UTF-8"; +static const char *path_encoding = "UTF-8-MAC"; + +static size_t has_non_ascii(const char *s, size_t maxlen, size_t *strlen_c) +{ + const uint8_t *ptr = (const uint8_t *)s; + size_t strlen_chars = 0; + size_t ret = 0; + + if (!ptr || !*ptr) + return 0; + + while (*ptr && maxlen) { + if (*ptr & 0x80) + ret++; + strlen_chars++; + ptr++; + maxlen--; + } + if (strlen_c) + *strlen_c = strlen_chars; + + return ret; +} + + +void probe_utf8_pathname_composition(char *path, int len) +{ + static const char *auml_nfc = "\xc3\xa4"; + static const char *auml_nfd = "\x61\xcc\x88"; + int output_fd; + if (precomposed_unicode != -1) + return; /* We found it defined in the global config, respect it */ + strcpy(path + len, auml_nfc); + output_fd = open(path, O_CREAT|O_EXCL|O_RDWR, 0600); + if (output_fd >= 0) { + close(output_fd); + strcpy(path + len, auml_nfd); + precomposed_unicode = access(path, R_OK) ? 0 : 1; + git_config_set("core.precomposeunicode", precomposed_unicode ? "true" : "false"); + strcpy(path + len, auml_nfc); + if (unlink(path)) + die_errno(_("failed to unlink '%s'"), path); + } +} + + +void precompose_argv(int argc, const char **argv) +{ + int i = 0; + const char *oldarg; + char *newarg; + iconv_t ic_precompose; + + if (precomposed_unicode != 1) + return; + + ic_precompose = iconv_open(repo_encoding, path_encoding); + if (ic_precompose == (iconv_t) -1) + return; + + while (i < argc) { + size_t namelen; + oldarg = argv[i]; + if (has_non_ascii(oldarg, (size_t)-1, &namelen)) { + newarg = reencode_string_iconv(oldarg, namelen, ic_precompose, NULL); + if (newarg) + argv[i] = newarg; + } + i++; + } + iconv_close(ic_precompose); +} + + +PREC_DIR *precompose_utf8_opendir(const char *dirname) +{ + PREC_DIR *prec_dir = xmalloc(sizeof(PREC_DIR)); + prec_dir->dirent_nfc = xmalloc(sizeof(dirent_prec_psx)); + prec_dir->dirent_nfc->max_name_len = sizeof(prec_dir->dirent_nfc->d_name); + + prec_dir->dirp = opendir(dirname); + if (!prec_dir->dirp) { + free(prec_dir->dirent_nfc); + free(prec_dir); + return NULL; + } else { + int ret_errno = errno; + prec_dir->ic_precompose = iconv_open(repo_encoding, path_encoding); + /* if iconv_open() fails, die() in readdir() if needed */ + errno = ret_errno; + } + + return prec_dir; +} + +struct dirent_prec_psx *precompose_utf8_readdir(PREC_DIR *prec_dir) +{ + struct dirent *res; + res = readdir(prec_dir->dirp); + if (res) { + size_t namelenz = strlen(res->d_name) + 1; /* \0 */ + size_t new_maxlen = namelenz; + + int ret_errno = errno; + + if (new_maxlen > prec_dir->dirent_nfc->max_name_len) { + size_t new_len = sizeof(dirent_prec_psx) + new_maxlen - + sizeof(prec_dir->dirent_nfc->d_name); + + prec_dir->dirent_nfc = xrealloc(prec_dir->dirent_nfc, new_len); + prec_dir->dirent_nfc->max_name_len = new_maxlen; + } + + prec_dir->dirent_nfc->d_ino = res->d_ino; + prec_dir->dirent_nfc->d_type = res->d_type; + + if ((precomposed_unicode == 1) && has_non_ascii(res->d_name, (size_t)-1, NULL)) { + if (prec_dir->ic_precompose == (iconv_t)-1) { + die("iconv_open(%s,%s) failed, but needed:\n" + " precomposed unicode is not supported.\n" + " If you want to use decomposed unicode, run\n" + " \"git config core.precomposeunicode false\"\n", + repo_encoding, path_encoding); + } else { + iconv_ibp cp = (iconv_ibp)res->d_name; + size_t inleft = namelenz; + char *outpos = &prec_dir->dirent_nfc->d_name[0]; + size_t outsz = prec_dir->dirent_nfc->max_name_len; + size_t cnt; + errno = 0; + cnt = iconv(prec_dir->ic_precompose, &cp, &inleft, &outpos, &outsz); + if (errno || inleft) { + /* + * iconv() failed and errno could be E2BIG, EILSEQ, EINVAL, EBADF + * MacOS X avoids illegal byte sequemces. + * If they occur on a mounted drive (e.g. NFS) it is not worth to + * die() for that, but rather let the user see the original name + */ + namelenz = 0; /* trigger strlcpy */ + } + } + } else + namelenz = 0; + + if (!namelenz) + strlcpy(prec_dir->dirent_nfc->d_name, res->d_name, + prec_dir->dirent_nfc->max_name_len); + + errno = ret_errno; + return prec_dir->dirent_nfc; + } + return NULL; +} + + +int precompose_utf8_closedir(PREC_DIR *prec_dir) +{ + int ret_value; + int ret_errno; + ret_value = closedir(prec_dir->dirp); + ret_errno = errno; + if (prec_dir->ic_precompose != (iconv_t)-1) + iconv_close(prec_dir->ic_precompose); + free(prec_dir->dirent_nfc); + free(prec_dir); + errno = ret_errno; + return ret_value; +} diff --git a/compat/precompose_utf8.h b/compat/precompose_utf8.h new file mode 100644 index 0000000000..3b73585fc5 --- /dev/null +++ b/compat/precompose_utf8.h @@ -0,0 +1,45 @@ +#ifndef PRECOMPOSE_UNICODE_H +#include <sys/stat.h> +#include <sys/types.h> +#include <dirent.h> +#include <iconv.h> + + +typedef struct dirent_prec_psx { + ino_t d_ino; /* Posix */ + size_t max_name_len; /* See below */ + unsigned char d_type; /* available on all systems git runs on */ + + /* + * See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/dirent.h.html + * NAME_MAX + 1 should be enough, but some systems have + * NAME_MAX=255 and strlen(d_name) may return 508 or 510 + * Solution: allocate more when needed, see precompose_utf8_readdir() + */ + char d_name[NAME_MAX+1]; +} dirent_prec_psx; + + +typedef struct { + iconv_t ic_precompose; + DIR *dirp; + struct dirent_prec_psx *dirent_nfc; +} PREC_DIR; + +void precompose_argv(int argc, const char **argv); +void probe_utf8_pathname_composition(char *, int); + +PREC_DIR *precompose_utf8_opendir(const char *dirname); +struct dirent_prec_psx *precompose_utf8_readdir(PREC_DIR *dirp); +int precompose_utf8_closedir(PREC_DIR *dirp); + +#ifndef PRECOMPOSE_UNICODE_C +#define dirent dirent_prec_psx +#define opendir(n) precompose_utf8_opendir(n) +#define readdir(d) precompose_utf8_readdir(d) +#define closedir(d) precompose_utf8_closedir(d) +#define DIR PREC_DIR +#endif /* PRECOMPOSE_UNICODE_C */ + +#define PRECOMPOSE_UNICODE_H +#endif /* PRECOMPOSE_UNICODE_H */ diff --git a/compat/qsort.c b/compat/qsort.c new file mode 100644 index 0000000000..9574d537bd --- /dev/null +++ b/compat/qsort.c @@ -0,0 +1,62 @@ +#include "../git-compat-util.h" + +/* + * A merge sort implementation, simplified from the qsort implementation + * by Mike Haertel, which is a part of the GNU C Library. + */ + +static void msort_with_tmp(void *b, size_t n, size_t s, + int (*cmp)(const void *, const void *), + char *t) +{ + char *tmp; + char *b1, *b2; + size_t n1, n2; + + if (n <= 1) + return; + + n1 = n / 2; + n2 = n - n1; + b1 = b; + b2 = (char *)b + (n1 * s); + + msort_with_tmp(b1, n1, s, cmp, t); + msort_with_tmp(b2, n2, s, cmp, t); + + tmp = t; + + while (n1 > 0 && n2 > 0) { + if (cmp(b1, b2) <= 0) { + memcpy(tmp, b1, s); + tmp += s; + b1 += s; + --n1; + } else { + memcpy(tmp, b2, s); + tmp += s; + b2 += s; + --n2; + } + } + if (n1 > 0) + memcpy(tmp, b1, n1 * s); + memcpy(b, t, (n - n2) * s); +} + +void git_qsort(void *b, size_t n, size_t s, + int (*cmp)(const void *, const void *)) +{ + const size_t size = n * s; + char buf[1024]; + + if (size < sizeof(buf)) { + /* The temporary array fits on the small on-stack buffer. */ + msort_with_tmp(b, n, s, cmp, buf); + } else { + /* It's somewhat large, so malloc it. */ + char *tmp = xmalloc(size); + msort_with_tmp(b, n, s, cmp, tmp); + free(tmp); + } +} diff --git a/compat/regex/regcomp.c b/compat/regex/regcomp.c new file mode 100644 index 0000000000..06f3088708 --- /dev/null +++ b/compat/regex/regcomp.c @@ -0,0 +1,3884 @@ +/* Extended regular expression matching and search library. + Copyright (C) 2002-2007,2009,2010 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Isamu Hasegawa <isamu@yamato.ibm.com>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301 USA. */ + +static reg_errcode_t re_compile_internal (regex_t *preg, const char * pattern, + size_t length, reg_syntax_t syntax); +static void re_compile_fastmap_iter (regex_t *bufp, + const re_dfastate_t *init_state, + char *fastmap); +static reg_errcode_t init_dfa (re_dfa_t *dfa, size_t pat_len); +#ifdef RE_ENABLE_I18N +static void free_charset (re_charset_t *cset); +#endif /* RE_ENABLE_I18N */ +static void free_workarea_compile (regex_t *preg); +static reg_errcode_t create_initial_state (re_dfa_t *dfa); +#ifdef RE_ENABLE_I18N +static void optimize_utf8 (re_dfa_t *dfa); +#endif +static reg_errcode_t analyze (regex_t *preg); +static reg_errcode_t preorder (bin_tree_t *root, + reg_errcode_t (fn (void *, bin_tree_t *)), + void *extra); +static reg_errcode_t postorder (bin_tree_t *root, + reg_errcode_t (fn (void *, bin_tree_t *)), + void *extra); +static reg_errcode_t optimize_subexps (void *extra, bin_tree_t *node); +static reg_errcode_t lower_subexps (void *extra, bin_tree_t *node); +static bin_tree_t *lower_subexp (reg_errcode_t *err, regex_t *preg, + bin_tree_t *node); +static reg_errcode_t calc_first (void *extra, bin_tree_t *node); +static reg_errcode_t calc_next (void *extra, bin_tree_t *node); +static reg_errcode_t link_nfa_nodes (void *extra, bin_tree_t *node); +static int duplicate_node (re_dfa_t *dfa, int org_idx, unsigned int constraint); +static int search_duplicated_node (const re_dfa_t *dfa, int org_node, + unsigned int constraint); +static reg_errcode_t calc_eclosure (re_dfa_t *dfa); +static reg_errcode_t calc_eclosure_iter (re_node_set *new_set, re_dfa_t *dfa, + int node, int root); +static reg_errcode_t calc_inveclosure (re_dfa_t *dfa); +static int fetch_number (re_string_t *input, re_token_t *token, + reg_syntax_t syntax); +static int peek_token (re_token_t *token, re_string_t *input, + reg_syntax_t syntax) internal_function; +static bin_tree_t *parse (re_string_t *regexp, regex_t *preg, + reg_syntax_t syntax, reg_errcode_t *err); +static bin_tree_t *parse_reg_exp (re_string_t *regexp, regex_t *preg, + re_token_t *token, reg_syntax_t syntax, + int nest, reg_errcode_t *err); +static bin_tree_t *parse_branch (re_string_t *regexp, regex_t *preg, + re_token_t *token, reg_syntax_t syntax, + int nest, reg_errcode_t *err); +static bin_tree_t *parse_expression (re_string_t *regexp, regex_t *preg, + re_token_t *token, reg_syntax_t syntax, + int nest, reg_errcode_t *err); +static bin_tree_t *parse_sub_exp (re_string_t *regexp, regex_t *preg, + re_token_t *token, reg_syntax_t syntax, + int nest, reg_errcode_t *err); +static bin_tree_t *parse_dup_op (bin_tree_t *dup_elem, re_string_t *regexp, + re_dfa_t *dfa, re_token_t *token, + reg_syntax_t syntax, reg_errcode_t *err); +static bin_tree_t *parse_bracket_exp (re_string_t *regexp, re_dfa_t *dfa, + re_token_t *token, reg_syntax_t syntax, + reg_errcode_t *err); +static reg_errcode_t parse_bracket_element (bracket_elem_t *elem, + re_string_t *regexp, + re_token_t *token, int token_len, + re_dfa_t *dfa, + reg_syntax_t syntax, + int accept_hyphen); +static reg_errcode_t parse_bracket_symbol (bracket_elem_t *elem, + re_string_t *regexp, + re_token_t *token); +#ifdef RE_ENABLE_I18N +static reg_errcode_t build_equiv_class (bitset_t sbcset, + re_charset_t *mbcset, + int *equiv_class_alloc, + const unsigned char *name); +static reg_errcode_t build_charclass (RE_TRANSLATE_TYPE trans, + bitset_t sbcset, + re_charset_t *mbcset, + int *char_class_alloc, + const char *class_name, + reg_syntax_t syntax); +#else /* not RE_ENABLE_I18N */ +static reg_errcode_t build_equiv_class (bitset_t sbcset, + const unsigned char *name); +static reg_errcode_t build_charclass (RE_TRANSLATE_TYPE trans, + bitset_t sbcset, + const char *class_name, + reg_syntax_t syntax); +#endif /* not RE_ENABLE_I18N */ +static bin_tree_t *build_charclass_op (re_dfa_t *dfa, + RE_TRANSLATE_TYPE trans, + const char *class_name, + const char *extra, + int non_match, reg_errcode_t *err); +static bin_tree_t *create_tree (re_dfa_t *dfa, + bin_tree_t *left, bin_tree_t *right, + re_token_type_t type); +static bin_tree_t *create_token_tree (re_dfa_t *dfa, + bin_tree_t *left, bin_tree_t *right, + const re_token_t *token); +static bin_tree_t *duplicate_tree (const bin_tree_t *src, re_dfa_t *dfa); +static void free_token (re_token_t *node); +static reg_errcode_t free_tree (void *extra, bin_tree_t *node); +static reg_errcode_t mark_opt_subexp (void *extra, bin_tree_t *node); + +/* This table gives an error message for each of the error codes listed + in regex.h. Obviously the order here has to be same as there. + POSIX doesn't require that we do anything for REG_NOERROR, + but why not be nice? */ + +const char __re_error_msgid[] attribute_hidden = + { +#define REG_NOERROR_IDX 0 + gettext_noop ("Success") /* REG_NOERROR */ + "\0" +#define REG_NOMATCH_IDX (REG_NOERROR_IDX + sizeof "Success") + gettext_noop ("No match") /* REG_NOMATCH */ + "\0" +#define REG_BADPAT_IDX (REG_NOMATCH_IDX + sizeof "No match") + gettext_noop ("Invalid regular expression") /* REG_BADPAT */ + "\0" +#define REG_ECOLLATE_IDX (REG_BADPAT_IDX + sizeof "Invalid regular expression") + gettext_noop ("Invalid collation character") /* REG_ECOLLATE */ + "\0" +#define REG_ECTYPE_IDX (REG_ECOLLATE_IDX + sizeof "Invalid collation character") + gettext_noop ("Invalid character class name") /* REG_ECTYPE */ + "\0" +#define REG_EESCAPE_IDX (REG_ECTYPE_IDX + sizeof "Invalid character class name") + gettext_noop ("Trailing backslash") /* REG_EESCAPE */ + "\0" +#define REG_ESUBREG_IDX (REG_EESCAPE_IDX + sizeof "Trailing backslash") + gettext_noop ("Invalid back reference") /* REG_ESUBREG */ + "\0" +#define REG_EBRACK_IDX (REG_ESUBREG_IDX + sizeof "Invalid back reference") + gettext_noop ("Unmatched [ or [^") /* REG_EBRACK */ + "\0" +#define REG_EPAREN_IDX (REG_EBRACK_IDX + sizeof "Unmatched [ or [^") + gettext_noop ("Unmatched ( or \\(") /* REG_EPAREN */ + "\0" +#define REG_EBRACE_IDX (REG_EPAREN_IDX + sizeof "Unmatched ( or \\(") + gettext_noop ("Unmatched \\{") /* REG_EBRACE */ + "\0" +#define REG_BADBR_IDX (REG_EBRACE_IDX + sizeof "Unmatched \\{") + gettext_noop ("Invalid content of \\{\\}") /* REG_BADBR */ + "\0" +#define REG_ERANGE_IDX (REG_BADBR_IDX + sizeof "Invalid content of \\{\\}") + gettext_noop ("Invalid range end") /* REG_ERANGE */ + "\0" +#define REG_ESPACE_IDX (REG_ERANGE_IDX + sizeof "Invalid range end") + gettext_noop ("Memory exhausted") /* REG_ESPACE */ + "\0" +#define REG_BADRPT_IDX (REG_ESPACE_IDX + sizeof "Memory exhausted") + gettext_noop ("Invalid preceding regular expression") /* REG_BADRPT */ + "\0" +#define REG_EEND_IDX (REG_BADRPT_IDX + sizeof "Invalid preceding regular expression") + gettext_noop ("Premature end of regular expression") /* REG_EEND */ + "\0" +#define REG_ESIZE_IDX (REG_EEND_IDX + sizeof "Premature end of regular expression") + gettext_noop ("Regular expression too big") /* REG_ESIZE */ + "\0" +#define REG_ERPAREN_IDX (REG_ESIZE_IDX + sizeof "Regular expression too big") + gettext_noop ("Unmatched ) or \\)") /* REG_ERPAREN */ + }; + +const size_t __re_error_msgid_idx[] attribute_hidden = + { + REG_NOERROR_IDX, + REG_NOMATCH_IDX, + REG_BADPAT_IDX, + REG_ECOLLATE_IDX, + REG_ECTYPE_IDX, + REG_EESCAPE_IDX, + REG_ESUBREG_IDX, + REG_EBRACK_IDX, + REG_EPAREN_IDX, + REG_EBRACE_IDX, + REG_BADBR_IDX, + REG_ERANGE_IDX, + REG_ESPACE_IDX, + REG_BADRPT_IDX, + REG_EEND_IDX, + REG_ESIZE_IDX, + REG_ERPAREN_IDX + }; + +/* Entry points for GNU code. */ + + +#ifdef ZOS_USS + +/* For ZOS USS we must define btowc */ + +wchar_t +btowc (int c) +{ + wchar_t wtmp[2]; + char tmp[2]; + + tmp[0] = c; + tmp[1] = 0; + + mbtowc (wtmp, tmp, 1); + return wtmp[0]; +} +#endif + +/* re_compile_pattern is the GNU regular expression compiler: it + compiles PATTERN (of length LENGTH) and puts the result in BUFP. + Returns 0 if the pattern was valid, otherwise an error string. + + Assumes the `allocated' (and perhaps `buffer') and `translate' fields + are set in BUFP on entry. */ + +const char * +re_compile_pattern (const char *pattern, + size_t length, + struct re_pattern_buffer *bufp) +{ + reg_errcode_t ret; + + /* And GNU code determines whether or not to get register information + by passing null for the REGS argument to re_match, etc., not by + setting no_sub, unless RE_NO_SUB is set. */ + bufp->no_sub = !!(re_syntax_options & RE_NO_SUB); + + /* Match anchors at newline. */ + bufp->newline_anchor = 1; + + ret = re_compile_internal (bufp, pattern, length, re_syntax_options); + + if (!ret) + return NULL; + return gettext (__re_error_msgid + __re_error_msgid_idx[(int) ret]); +} +#ifdef _LIBC +weak_alias (__re_compile_pattern, re_compile_pattern) +#endif + +/* Set by `re_set_syntax' to the current regexp syntax to recognize. Can + also be assigned to arbitrarily: each pattern buffer stores its own + syntax, so it can be changed between regex compilations. */ +/* This has no initializer because initialized variables in Emacs + become read-only after dumping. */ +reg_syntax_t re_syntax_options; + + +/* Specify the precise syntax of regexps for compilation. This provides + for compatibility for various utilities which historically have + different, incompatible syntaxes. + + The argument SYNTAX is a bit mask comprised of the various bits + defined in regex.h. We return the old syntax. */ + +reg_syntax_t +re_set_syntax (reg_syntax_t syntax) +{ + reg_syntax_t ret = re_syntax_options; + + re_syntax_options = syntax; + return ret; +} +#ifdef _LIBC +weak_alias (__re_set_syntax, re_set_syntax) +#endif + +int +re_compile_fastmap (struct re_pattern_buffer *bufp) +{ + re_dfa_t *dfa = (re_dfa_t *) bufp->buffer; + char *fastmap = bufp->fastmap; + + memset (fastmap, '\0', sizeof (char) * SBC_MAX); + re_compile_fastmap_iter (bufp, dfa->init_state, fastmap); + if (dfa->init_state != dfa->init_state_word) + re_compile_fastmap_iter (bufp, dfa->init_state_word, fastmap); + if (dfa->init_state != dfa->init_state_nl) + re_compile_fastmap_iter (bufp, dfa->init_state_nl, fastmap); + if (dfa->init_state != dfa->init_state_begbuf) + re_compile_fastmap_iter (bufp, dfa->init_state_begbuf, fastmap); + bufp->fastmap_accurate = 1; + return 0; +} +#ifdef _LIBC +weak_alias (__re_compile_fastmap, re_compile_fastmap) +#endif + +static inline void +__attribute ((always_inline)) +re_set_fastmap (char *fastmap, int icase, int ch) +{ + fastmap[ch] = 1; + if (icase) + fastmap[tolower (ch)] = 1; +} + +/* Helper function for re_compile_fastmap. + Compile fastmap for the initial_state INIT_STATE. */ + +static void +re_compile_fastmap_iter (regex_t *bufp, const re_dfastate_t *init_state, + char *fastmap) +{ + volatile re_dfa_t *dfa = (re_dfa_t *) bufp->buffer; + int node_cnt; + int icase = (dfa->mb_cur_max == 1 && (bufp->syntax & RE_ICASE)); + for (node_cnt = 0; node_cnt < init_state->nodes.nelem; ++node_cnt) + { + int node = init_state->nodes.elems[node_cnt]; + re_token_type_t type = dfa->nodes[node].type; + + if (type == CHARACTER) + { + re_set_fastmap (fastmap, icase, dfa->nodes[node].opr.c); +#ifdef RE_ENABLE_I18N + if ((bufp->syntax & RE_ICASE) && dfa->mb_cur_max > 1) + { + unsigned char *buf = re_malloc (unsigned char, dfa->mb_cur_max), *p; + wchar_t wc; + mbstate_t state; + + p = buf; + *p++ = dfa->nodes[node].opr.c; + while (++node < dfa->nodes_len + && dfa->nodes[node].type == CHARACTER + && dfa->nodes[node].mb_partial) + *p++ = dfa->nodes[node].opr.c; + memset (&state, '\0', sizeof (state)); + if (__mbrtowc (&wc, (const char *) buf, p - buf, + &state) == p - buf + && (__wcrtomb ((char *) buf, towlower (wc), &state) + != (size_t) -1)) + re_set_fastmap (fastmap, 0, buf[0]); + re_free (buf); + } +#endif + } + else if (type == SIMPLE_BRACKET) + { + int i, ch; + for (i = 0, ch = 0; i < BITSET_WORDS; ++i) + { + int j; + bitset_word_t w = dfa->nodes[node].opr.sbcset[i]; + for (j = 0; j < BITSET_WORD_BITS; ++j, ++ch) + if (w & ((bitset_word_t) 1 << j)) + re_set_fastmap (fastmap, icase, ch); + } + } +#ifdef RE_ENABLE_I18N + else if (type == COMPLEX_BRACKET) + { + re_charset_t *cset = dfa->nodes[node].opr.mbcset; + int i; + +# ifdef _LIBC + /* See if we have to try all bytes which start multiple collation + elements. + e.g. In da_DK, we want to catch 'a' since "aa" is a valid + collation element, and don't catch 'b' since 'b' is + the only collation element which starts from 'b' (and + it is caught by SIMPLE_BRACKET). */ + if (_NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES) != 0 + && (cset->ncoll_syms || cset->nranges)) + { + const int32_t *table = (const int32_t *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); + for (i = 0; i < SBC_MAX; ++i) + if (table[i] < 0) + re_set_fastmap (fastmap, icase, i); + } +# endif /* _LIBC */ + + /* See if we have to start the match at all multibyte characters, + i.e. where we would not find an invalid sequence. This only + applies to multibyte character sets; for single byte character + sets, the SIMPLE_BRACKET again suffices. */ + if (dfa->mb_cur_max > 1 + && (cset->nchar_classes || cset->non_match || cset->nranges +# ifdef _LIBC + || cset->nequiv_classes +# endif /* _LIBC */ + )) + { + unsigned char c = 0; + do + { + mbstate_t mbs; + memset (&mbs, 0, sizeof (mbs)); + if (__mbrtowc (NULL, (char *) &c, 1, &mbs) == (size_t) -2) + re_set_fastmap (fastmap, false, (int) c); + } + while (++c != 0); + } + + else + { + /* ... Else catch all bytes which can start the mbchars. */ + for (i = 0; i < cset->nmbchars; ++i) + { + char buf[256]; + mbstate_t state; + memset (&state, '\0', sizeof (state)); + if (__wcrtomb (buf, cset->mbchars[i], &state) != (size_t) -1) + re_set_fastmap (fastmap, icase, *(unsigned char *) buf); + if ((bufp->syntax & RE_ICASE) && dfa->mb_cur_max > 1) + { + if (__wcrtomb (buf, towlower (cset->mbchars[i]), &state) + != (size_t) -1) + re_set_fastmap (fastmap, false, *(unsigned char *) buf); + } + } + } + } +#endif /* RE_ENABLE_I18N */ + else if (type == OP_PERIOD +#ifdef RE_ENABLE_I18N + || type == OP_UTF8_PERIOD +#endif /* RE_ENABLE_I18N */ + || type == END_OF_RE) + { + memset (fastmap, '\1', sizeof (char) * SBC_MAX); + if (type == END_OF_RE) + bufp->can_be_null = 1; + return; + } + } +} + +/* Entry point for POSIX code. */ +/* regcomp takes a regular expression as a string and compiles it. + + PREG is a regex_t *. We do not expect any fields to be initialized, + since POSIX says we shouldn't. Thus, we set + + `buffer' to the compiled pattern; + `used' to the length of the compiled pattern; + `syntax' to RE_SYNTAX_POSIX_EXTENDED if the + REG_EXTENDED bit in CFLAGS is set; otherwise, to + RE_SYNTAX_POSIX_BASIC; + `newline_anchor' to REG_NEWLINE being set in CFLAGS; + `fastmap' to an allocated space for the fastmap; + `fastmap_accurate' to zero; + `re_nsub' to the number of subexpressions in PATTERN. + + PATTERN is the address of the pattern string. + + CFLAGS is a series of bits which affect compilation. + + If REG_EXTENDED is set, we use POSIX extended syntax; otherwise, we + use POSIX basic syntax. + + If REG_NEWLINE is set, then . and [^...] don't match newline. + Also, regexec will try a match beginning after every newline. + + If REG_ICASE is set, then we considers upper- and lowercase + versions of letters to be equivalent when matching. + + If REG_NOSUB is set, then when PREG is passed to regexec, that + routine will report only success or failure, and nothing about the + registers. + + It returns 0 if it succeeds, nonzero if it doesn't. (See regex.h for + the return codes and their meanings.) */ + +int +regcomp (regex_t *__restrict preg, + const char *__restrict pattern, + int cflags) +{ + reg_errcode_t ret; + reg_syntax_t syntax = ((cflags & REG_EXTENDED) ? RE_SYNTAX_POSIX_EXTENDED + : RE_SYNTAX_POSIX_BASIC); + + preg->buffer = NULL; + preg->allocated = 0; + preg->used = 0; + + /* Try to allocate space for the fastmap. */ + preg->fastmap = re_malloc (char, SBC_MAX); + if (BE (preg->fastmap == NULL, 0)) + return REG_ESPACE; + + syntax |= (cflags & REG_ICASE) ? RE_ICASE : 0; + + /* If REG_NEWLINE is set, newlines are treated differently. */ + if (cflags & REG_NEWLINE) + { /* REG_NEWLINE implies neither . nor [^...] match newline. */ + syntax &= ~RE_DOT_NEWLINE; + syntax |= RE_HAT_LISTS_NOT_NEWLINE; + /* It also changes the matching behavior. */ + preg->newline_anchor = 1; + } + else + preg->newline_anchor = 0; + preg->no_sub = !!(cflags & REG_NOSUB); + preg->translate = NULL; + + ret = re_compile_internal (preg, pattern, strlen (pattern), syntax); + + /* POSIX doesn't distinguish between an unmatched open-group and an + unmatched close-group: both are REG_EPAREN. */ + if (ret == REG_ERPAREN) + ret = REG_EPAREN; + + /* We have already checked preg->fastmap != NULL. */ + if (BE (ret == REG_NOERROR, 1)) + /* Compute the fastmap now, since regexec cannot modify the pattern + buffer. This function never fails in this implementation. */ + (void) re_compile_fastmap (preg); + else + { + /* Some error occurred while compiling the expression. */ + re_free (preg->fastmap); + preg->fastmap = NULL; + } + + return (int) ret; +} +#ifdef _LIBC +weak_alias (__regcomp, regcomp) +#endif + +/* Returns a message corresponding to an error code, ERRCODE, returned + from either regcomp or regexec. We don't use PREG here. */ + +size_t +regerror(int errcode, const regex_t *__restrict preg, + char *__restrict errbuf, size_t errbuf_size) +{ + const char *msg; + size_t msg_size; + + if (BE (errcode < 0 + || errcode >= (int) (sizeof (__re_error_msgid_idx) + / sizeof (__re_error_msgid_idx[0])), 0)) + /* Only error codes returned by the rest of the code should be passed + to this routine. If we are given anything else, or if other regex + code generates an invalid error code, then the program has a bug. + Dump core so we can fix it. */ + abort (); + + msg = gettext (__re_error_msgid + __re_error_msgid_idx[errcode]); + + msg_size = strlen (msg) + 1; /* Includes the null. */ + + if (BE (errbuf_size != 0, 1)) + { + if (BE (msg_size > errbuf_size, 0)) + { + memcpy (errbuf, msg, errbuf_size - 1); + errbuf[errbuf_size - 1] = 0; + } + else + memcpy (errbuf, msg, msg_size); + } + + return msg_size; +} +#ifdef _LIBC +weak_alias (__regerror, regerror) +#endif + + +#ifdef RE_ENABLE_I18N +/* This static array is used for the map to single-byte characters when + UTF-8 is used. Otherwise we would allocate memory just to initialize + it the same all the time. UTF-8 is the preferred encoding so this is + a worthwhile optimization. */ +#if __GNUC__ >= 3 +static const bitset_t utf8_sb_map = { + /* Set the first 128 bits. */ + [0 ... 0x80 / BITSET_WORD_BITS - 1] = BITSET_WORD_MAX +}; +#else /* ! (__GNUC__ >= 3) */ +static bitset_t utf8_sb_map; +#endif /* __GNUC__ >= 3 */ +#endif /* RE_ENABLE_I18N */ + + +static void +free_dfa_content (re_dfa_t *dfa) +{ + int i, j; + + if (dfa->nodes) + for (i = 0; i < dfa->nodes_len; ++i) + free_token (dfa->nodes + i); + re_free (dfa->nexts); + for (i = 0; i < dfa->nodes_len; ++i) + { + if (dfa->eclosures != NULL) + re_node_set_free (dfa->eclosures + i); + if (dfa->inveclosures != NULL) + re_node_set_free (dfa->inveclosures + i); + if (dfa->edests != NULL) + re_node_set_free (dfa->edests + i); + } + re_free (dfa->edests); + re_free (dfa->eclosures); + re_free (dfa->inveclosures); + re_free (dfa->nodes); + + if (dfa->state_table) + for (i = 0; i <= dfa->state_hash_mask; ++i) + { + struct re_state_table_entry *entry = dfa->state_table + i; + for (j = 0; j < entry->num; ++j) + { + re_dfastate_t *state = entry->array[j]; + free_state (state); + } + re_free (entry->array); + } + re_free (dfa->state_table); +#ifdef RE_ENABLE_I18N + if (dfa->sb_char != utf8_sb_map) + re_free (dfa->sb_char); +#endif + re_free (dfa->subexp_map); +#ifdef DEBUG + re_free (dfa->re_str); +#endif + + re_free (dfa); +} + + +/* Free dynamically allocated space used by PREG. */ + +void +regfree (regex_t *preg) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + if (BE (dfa != NULL, 1)) + free_dfa_content (dfa); + preg->buffer = NULL; + preg->allocated = 0; + + re_free (preg->fastmap); + preg->fastmap = NULL; + + re_free (preg->translate); + preg->translate = NULL; +} +#ifdef _LIBC +weak_alias (__regfree, regfree) +#endif + +/* Entry points compatible with 4.2 BSD regex library. We don't define + them unless specifically requested. */ + +#if defined _REGEX_RE_COMP || defined _LIBC + +/* BSD has one and only one pattern buffer. */ +static struct re_pattern_buffer re_comp_buf; + +char * +# ifdef _LIBC +/* Make these definitions weak in libc, so POSIX programs can redefine + these names if they don't use our functions, and still use + regcomp/regexec above without link errors. */ +weak_function +# endif +re_comp (s) + const char *s; +{ + reg_errcode_t ret; + char *fastmap; + + if (!s) + { + if (!re_comp_buf.buffer) + return gettext ("No previous regular expression"); + return 0; + } + + if (re_comp_buf.buffer) + { + fastmap = re_comp_buf.fastmap; + re_comp_buf.fastmap = NULL; + __regfree (&re_comp_buf); + memset (&re_comp_buf, '\0', sizeof (re_comp_buf)); + re_comp_buf.fastmap = fastmap; + } + + if (re_comp_buf.fastmap == NULL) + { + re_comp_buf.fastmap = (char *) malloc (SBC_MAX); + if (re_comp_buf.fastmap == NULL) + return (char *) gettext (__re_error_msgid + + __re_error_msgid_idx[(int) REG_ESPACE]); + } + + /* Since `re_exec' always passes NULL for the `regs' argument, we + don't need to initialize the pattern buffer fields which affect it. */ + + /* Match anchors at newlines. */ + re_comp_buf.newline_anchor = 1; + + ret = re_compile_internal (&re_comp_buf, s, strlen (s), re_syntax_options); + + if (!ret) + return NULL; + + /* Yes, we're discarding `const' here if !HAVE_LIBINTL. */ + return (char *) gettext (__re_error_msgid + __re_error_msgid_idx[(int) ret]); +} + +#ifdef _LIBC +libc_freeres_fn (free_mem) +{ + __regfree (&re_comp_buf); +} +#endif + +#endif /* _REGEX_RE_COMP */ + +/* Internal entry point. + Compile the regular expression PATTERN, whose length is LENGTH. + SYNTAX indicate regular expression's syntax. */ + +static reg_errcode_t +re_compile_internal (regex_t *preg, const char * pattern, size_t length, + reg_syntax_t syntax) +{ + reg_errcode_t err = REG_NOERROR; + re_dfa_t *dfa; + re_string_t regexp; + + /* Initialize the pattern buffer. */ + preg->fastmap_accurate = 0; + preg->syntax = syntax; + preg->not_bol = preg->not_eol = 0; + preg->used = 0; + preg->re_nsub = 0; + preg->can_be_null = 0; + preg->regs_allocated = REGS_UNALLOCATED; + + /* Initialize the dfa. */ + dfa = (re_dfa_t *) preg->buffer; + if (BE (preg->allocated < sizeof (re_dfa_t), 0)) + { + /* If zero allocated, but buffer is non-null, try to realloc + enough space. This loses if buffer's address is bogus, but + that is the user's responsibility. If ->buffer is NULL this + is a simple allocation. */ + dfa = re_realloc (preg->buffer, re_dfa_t, 1); + if (dfa == NULL) + return REG_ESPACE; + preg->allocated = sizeof (re_dfa_t); + preg->buffer = (unsigned char *) dfa; + } + preg->used = sizeof (re_dfa_t); + + err = init_dfa (dfa, length); + if (BE (err != REG_NOERROR, 0)) + { + free_dfa_content (dfa); + preg->buffer = NULL; + preg->allocated = 0; + return err; + } +#ifdef DEBUG + /* Note: length+1 will not overflow since it is checked in init_dfa. */ + dfa->re_str = re_malloc (char, length + 1); + strncpy (dfa->re_str, pattern, length + 1); +#endif + + __libc_lock_init (dfa->lock); + + err = re_string_construct (®exp, pattern, length, preg->translate, + syntax & RE_ICASE, dfa); + if (BE (err != REG_NOERROR, 0)) + { + re_compile_internal_free_return: + free_workarea_compile (preg); + re_string_destruct (®exp); + free_dfa_content (dfa); + preg->buffer = NULL; + preg->allocated = 0; + return err; + } + + /* Parse the regular expression, and build a structure tree. */ + preg->re_nsub = 0; + dfa->str_tree = parse (®exp, preg, syntax, &err); + if (BE (dfa->str_tree == NULL, 0)) + goto re_compile_internal_free_return; + + /* Analyze the tree and create the nfa. */ + err = analyze (preg); + if (BE (err != REG_NOERROR, 0)) + goto re_compile_internal_free_return; + +#ifdef RE_ENABLE_I18N + /* If possible, do searching in single byte encoding to speed things up. */ + if (dfa->is_utf8 && !(syntax & RE_ICASE) && preg->translate == NULL) + optimize_utf8 (dfa); +#endif + + /* Then create the initial state of the dfa. */ + err = create_initial_state (dfa); + + /* Release work areas. */ + free_workarea_compile (preg); + re_string_destruct (®exp); + + if (BE (err != REG_NOERROR, 0)) + { + free_dfa_content (dfa); + preg->buffer = NULL; + preg->allocated = 0; + } + + return err; +} + +/* Initialize DFA. We use the length of the regular expression PAT_LEN + as the initial length of some arrays. */ + +static reg_errcode_t +init_dfa (re_dfa_t *dfa, size_t pat_len) +{ + unsigned int table_size; +#ifndef _LIBC + char *codeset_name; +#endif + + memset (dfa, '\0', sizeof (re_dfa_t)); + + /* Force allocation of str_tree_storage the first time. */ + dfa->str_tree_storage_idx = BIN_TREE_STORAGE_SIZE; + + /* Avoid overflows. */ + if (pat_len == SIZE_MAX) + return REG_ESPACE; + + dfa->nodes_alloc = pat_len + 1; + dfa->nodes = re_malloc (re_token_t, dfa->nodes_alloc); + + /* table_size = 2 ^ ceil(log pat_len) */ + for (table_size = 1; ; table_size <<= 1) + if (table_size > pat_len) + break; + + dfa->state_table = calloc (sizeof (struct re_state_table_entry), table_size); + dfa->state_hash_mask = table_size - 1; + + dfa->mb_cur_max = MB_CUR_MAX; +#ifdef _LIBC + if (dfa->mb_cur_max == 6 + && strcmp (_NL_CURRENT (LC_CTYPE, _NL_CTYPE_CODESET_NAME), "UTF-8") == 0) + dfa->is_utf8 = 1; + dfa->map_notascii = (_NL_CURRENT_WORD (LC_CTYPE, _NL_CTYPE_MAP_TO_NONASCII) + != 0); +#else +# ifdef HAVE_LANGINFO_CODESET + codeset_name = nl_langinfo (CODESET); +# else + codeset_name = getenv ("LC_ALL"); + if (codeset_name == NULL || codeset_name[0] == '\0') + codeset_name = getenv ("LC_CTYPE"); + if (codeset_name == NULL || codeset_name[0] == '\0') + codeset_name = getenv ("LANG"); + if (codeset_name == NULL) + codeset_name = ""; + else if (strchr (codeset_name, '.') != NULL) + codeset_name = strchr (codeset_name, '.') + 1; +# endif + + /* strcasecmp isn't a standard interface. brute force check */ +#if 0 + if (strcasecmp (codeset_name, "UTF-8") == 0 + || strcasecmp (codeset_name, "UTF8") == 0) + dfa->is_utf8 = 1; +#else + if ( (codeset_name[0] == 'U' || codeset_name[0] == 'u') + && (codeset_name[1] == 'T' || codeset_name[1] == 't') + && (codeset_name[2] == 'F' || codeset_name[2] == 'f') + && (codeset_name[3] == '-' + ? codeset_name[4] == '8' && codeset_name[5] == '\0' + : codeset_name[3] == '8' && codeset_name[4] == '\0')) + dfa->is_utf8 = 1; +#endif + + /* We check exhaustively in the loop below if this charset is a + superset of ASCII. */ + dfa->map_notascii = 0; +#endif + +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + { + if (dfa->is_utf8) + { +#if !defined(__GNUC__) || __GNUC__ < 3 + static short utf8_sb_map_inited = 0; + + if (! utf8_sb_map_inited) + { + int i; + + utf8_sb_map_inited = 0; + for (i = 0; i <= 0x80 / BITSET_WORD_BITS - 1; i++) + utf8_sb_map[i] = BITSET_WORD_MAX; + } +#endif + dfa->sb_char = (re_bitset_ptr_t) utf8_sb_map; + } + else + { + int i, j, ch; + + dfa->sb_char = (re_bitset_ptr_t) calloc (sizeof (bitset_t), 1); + if (BE (dfa->sb_char == NULL, 0)) + return REG_ESPACE; + + /* Set the bits corresponding to single byte chars. */ + for (i = 0, ch = 0; i < BITSET_WORDS; ++i) + for (j = 0; j < BITSET_WORD_BITS; ++j, ++ch) + { + wint_t wch = __btowc (ch); + if (wch != WEOF) + dfa->sb_char[i] |= (bitset_word_t) 1 << j; +# ifndef _LIBC + if (isascii (ch) && wch != ch) + dfa->map_notascii = 1; +# endif + } + } + } +#endif + + if (BE (dfa->nodes == NULL || dfa->state_table == NULL, 0)) + return REG_ESPACE; + return REG_NOERROR; +} + +/* Initialize WORD_CHAR table, which indicate which character is + "word". In this case "word" means that it is the word construction + character used by some operators like "\<", "\>", etc. */ + +static void +internal_function +init_word_char (re_dfa_t *dfa) +{ + int i, j, ch; + dfa->word_ops_used = 1; + for (i = 0, ch = 0; i < BITSET_WORDS; ++i) + for (j = 0; j < BITSET_WORD_BITS; ++j, ++ch) + if (isalnum (ch) || ch == '_') + dfa->word_char[i] |= (bitset_word_t) 1 << j; +} + +/* Free the work area which are only used while compiling. */ + +static void +free_workarea_compile (regex_t *preg) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + bin_tree_storage_t *storage, *next; + for (storage = dfa->str_tree_storage; storage; storage = next) + { + next = storage->next; + re_free (storage); + } + dfa->str_tree_storage = NULL; + dfa->str_tree_storage_idx = BIN_TREE_STORAGE_SIZE; + dfa->str_tree = NULL; + re_free (dfa->org_indices); + dfa->org_indices = NULL; +} + +/* Create initial states for all contexts. */ + +static reg_errcode_t +create_initial_state (re_dfa_t *dfa) +{ + int first, i; + reg_errcode_t err; + re_node_set init_nodes; + + /* Initial states have the epsilon closure of the node which is + the first node of the regular expression. */ + first = dfa->str_tree->first->node_idx; + dfa->init_node = first; + err = re_node_set_init_copy (&init_nodes, dfa->eclosures + first); + if (BE (err != REG_NOERROR, 0)) + return err; + + /* The back-references which are in initial states can epsilon transit, + since in this case all of the subexpressions can be null. + Then we add epsilon closures of the nodes which are the next nodes of + the back-references. */ + if (dfa->nbackref > 0) + for (i = 0; i < init_nodes.nelem; ++i) + { + int node_idx = init_nodes.elems[i]; + re_token_type_t type = dfa->nodes[node_idx].type; + + int clexp_idx; + if (type != OP_BACK_REF) + continue; + for (clexp_idx = 0; clexp_idx < init_nodes.nelem; ++clexp_idx) + { + re_token_t *clexp_node; + clexp_node = dfa->nodes + init_nodes.elems[clexp_idx]; + if (clexp_node->type == OP_CLOSE_SUBEXP + && clexp_node->opr.idx == dfa->nodes[node_idx].opr.idx) + break; + } + if (clexp_idx == init_nodes.nelem) + continue; + + if (type == OP_BACK_REF) + { + int dest_idx = dfa->edests[node_idx].elems[0]; + if (!re_node_set_contains (&init_nodes, dest_idx)) + { + reg_errcode_t err = re_node_set_merge (&init_nodes, + dfa->eclosures + + dest_idx); + if (err != REG_NOERROR) + return err; + i = 0; + } + } + } + + /* It must be the first time to invoke acquire_state. */ + dfa->init_state = re_acquire_state_context (&err, dfa, &init_nodes, 0); + /* We don't check ERR here, since the initial state must not be NULL. */ + if (BE (dfa->init_state == NULL, 0)) + return err; + if (dfa->init_state->has_constraint) + { + dfa->init_state_word = re_acquire_state_context (&err, dfa, &init_nodes, + CONTEXT_WORD); + dfa->init_state_nl = re_acquire_state_context (&err, dfa, &init_nodes, + CONTEXT_NEWLINE); + dfa->init_state_begbuf = re_acquire_state_context (&err, dfa, + &init_nodes, + CONTEXT_NEWLINE + | CONTEXT_BEGBUF); + if (BE (dfa->init_state_word == NULL || dfa->init_state_nl == NULL + || dfa->init_state_begbuf == NULL, 0)) + return err; + } + else + dfa->init_state_word = dfa->init_state_nl + = dfa->init_state_begbuf = dfa->init_state; + + re_node_set_free (&init_nodes); + return REG_NOERROR; +} + +#ifdef RE_ENABLE_I18N +/* If it is possible to do searching in single byte encoding instead of UTF-8 + to speed things up, set dfa->mb_cur_max to 1, clear is_utf8 and change + DFA nodes where needed. */ + +static void +optimize_utf8 (re_dfa_t *dfa) +{ + int node, i, mb_chars = 0, has_period = 0; + + for (node = 0; node < dfa->nodes_len; ++node) + switch (dfa->nodes[node].type) + { + case CHARACTER: + if (dfa->nodes[node].opr.c >= 0x80) + mb_chars = 1; + break; + case ANCHOR: + switch (dfa->nodes[node].opr.ctx_type) + { + case LINE_FIRST: + case LINE_LAST: + case BUF_FIRST: + case BUF_LAST: + break; + default: + /* Word anchors etc. cannot be handled. It's okay to test + opr.ctx_type since constraints (for all DFA nodes) are + created by ORing one or more opr.ctx_type values. */ + return; + } + break; + case OP_PERIOD: + has_period = 1; + break; + case OP_BACK_REF: + case OP_ALT: + case END_OF_RE: + case OP_DUP_ASTERISK: + case OP_OPEN_SUBEXP: + case OP_CLOSE_SUBEXP: + break; + case COMPLEX_BRACKET: + return; + case SIMPLE_BRACKET: + /* Just double check. The non-ASCII range starts at 0x80. */ + assert (0x80 % BITSET_WORD_BITS == 0); + for (i = 0x80 / BITSET_WORD_BITS; i < BITSET_WORDS; ++i) + if (dfa->nodes[node].opr.sbcset[i]) + return; + break; + default: + abort (); + } + + if (mb_chars || has_period) + for (node = 0; node < dfa->nodes_len; ++node) + { + if (dfa->nodes[node].type == CHARACTER + && dfa->nodes[node].opr.c >= 0x80) + dfa->nodes[node].mb_partial = 0; + else if (dfa->nodes[node].type == OP_PERIOD) + dfa->nodes[node].type = OP_UTF8_PERIOD; + } + + /* The search can be in single byte locale. */ + dfa->mb_cur_max = 1; + dfa->is_utf8 = 0; + dfa->has_mb_node = dfa->nbackref > 0 || has_period; +} +#endif + +/* Analyze the structure tree, and calculate "first", "next", "edest", + "eclosure", and "inveclosure". */ + +static reg_errcode_t +analyze (regex_t *preg) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + reg_errcode_t ret; + + /* Allocate arrays. */ + dfa->nexts = re_malloc (int, dfa->nodes_alloc); + dfa->org_indices = re_malloc (int, dfa->nodes_alloc); + dfa->edests = re_malloc (re_node_set, dfa->nodes_alloc); + dfa->eclosures = re_malloc (re_node_set, dfa->nodes_alloc); + if (BE (dfa->nexts == NULL || dfa->org_indices == NULL || dfa->edests == NULL + || dfa->eclosures == NULL, 0)) + return REG_ESPACE; + + dfa->subexp_map = re_malloc (int, preg->re_nsub); + if (dfa->subexp_map != NULL) + { + int i; + for (i = 0; i < preg->re_nsub; i++) + dfa->subexp_map[i] = i; + preorder (dfa->str_tree, optimize_subexps, dfa); + for (i = 0; i < preg->re_nsub; i++) + if (dfa->subexp_map[i] != i) + break; + if (i == preg->re_nsub) + { + free (dfa->subexp_map); + dfa->subexp_map = NULL; + } + } + + ret = postorder (dfa->str_tree, lower_subexps, preg); + if (BE (ret != REG_NOERROR, 0)) + return ret; + ret = postorder (dfa->str_tree, calc_first, dfa); + if (BE (ret != REG_NOERROR, 0)) + return ret; + preorder (dfa->str_tree, calc_next, dfa); + ret = preorder (dfa->str_tree, link_nfa_nodes, dfa); + if (BE (ret != REG_NOERROR, 0)) + return ret; + ret = calc_eclosure (dfa); + if (BE (ret != REG_NOERROR, 0)) + return ret; + + /* We only need this during the prune_impossible_nodes pass in regexec.c; + skip it if p_i_n will not run, as calc_inveclosure can be quadratic. */ + if ((!preg->no_sub && preg->re_nsub > 0 && dfa->has_plural_match) + || dfa->nbackref) + { + dfa->inveclosures = re_malloc (re_node_set, dfa->nodes_len); + if (BE (dfa->inveclosures == NULL, 0)) + return REG_ESPACE; + ret = calc_inveclosure (dfa); + } + + return ret; +} + +/* Our parse trees are very unbalanced, so we cannot use a stack to + implement parse tree visits. Instead, we use parent pointers and + some hairy code in these two functions. */ +static reg_errcode_t +postorder (bin_tree_t *root, reg_errcode_t (fn (void *, bin_tree_t *)), + void *extra) +{ + bin_tree_t *node, *prev; + + for (node = root; ; ) + { + /* Descend down the tree, preferably to the left (or to the right + if that's the only child). */ + while (node->left || node->right) + if (node->left) + node = node->left; + else + node = node->right; + + do + { + reg_errcode_t err = fn (extra, node); + if (BE (err != REG_NOERROR, 0)) + return err; + if (node->parent == NULL) + return REG_NOERROR; + prev = node; + node = node->parent; + } + /* Go up while we have a node that is reached from the right. */ + while (node->right == prev || node->right == NULL); + node = node->right; + } +} + +static reg_errcode_t +preorder (bin_tree_t *root, reg_errcode_t (fn (void *, bin_tree_t *)), + void *extra) +{ + bin_tree_t *node; + + for (node = root; ; ) + { + reg_errcode_t err = fn (extra, node); + if (BE (err != REG_NOERROR, 0)) + return err; + + /* Go to the left node, or up and to the right. */ + if (node->left) + node = node->left; + else + { + bin_tree_t *prev = NULL; + while (node->right == prev || node->right == NULL) + { + prev = node; + node = node->parent; + if (!node) + return REG_NOERROR; + } + node = node->right; + } + } +} + +/* Optimization pass: if a SUBEXP is entirely contained, strip it and tell + re_search_internal to map the inner one's opr.idx to this one's. Adjust + backreferences as well. Requires a preorder visit. */ +static reg_errcode_t +optimize_subexps (void *extra, bin_tree_t *node) +{ + re_dfa_t *dfa = (re_dfa_t *) extra; + + if (node->token.type == OP_BACK_REF && dfa->subexp_map) + { + int idx = node->token.opr.idx; + node->token.opr.idx = dfa->subexp_map[idx]; + dfa->used_bkref_map |= 1 << node->token.opr.idx; + } + + else if (node->token.type == SUBEXP + && node->left && node->left->token.type == SUBEXP) + { + int other_idx = node->left->token.opr.idx; + + node->left = node->left->left; + if (node->left) + node->left->parent = node; + + dfa->subexp_map[other_idx] = dfa->subexp_map[node->token.opr.idx]; + if (other_idx < BITSET_WORD_BITS) + dfa->used_bkref_map &= ~((bitset_word_t) 1 << other_idx); + } + + return REG_NOERROR; +} + +/* Lowering pass: Turn each SUBEXP node into the appropriate concatenation + of OP_OPEN_SUBEXP, the body of the SUBEXP (if any) and OP_CLOSE_SUBEXP. */ +static reg_errcode_t +lower_subexps (void *extra, bin_tree_t *node) +{ + regex_t *preg = (regex_t *) extra; + reg_errcode_t err = REG_NOERROR; + + if (node->left && node->left->token.type == SUBEXP) + { + node->left = lower_subexp (&err, preg, node->left); + if (node->left) + node->left->parent = node; + } + if (node->right && node->right->token.type == SUBEXP) + { + node->right = lower_subexp (&err, preg, node->right); + if (node->right) + node->right->parent = node; + } + + return err; +} + +static bin_tree_t * +lower_subexp (reg_errcode_t *err, regex_t *preg, bin_tree_t *node) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + bin_tree_t *body = node->left; + bin_tree_t *op, *cls, *tree1, *tree; + + if (preg->no_sub + /* We do not optimize empty subexpressions, because otherwise we may + have bad CONCAT nodes with NULL children. This is obviously not + very common, so we do not lose much. An example that triggers + this case is the sed "script" /\(\)/x. */ + && node->left != NULL + && (node->token.opr.idx >= BITSET_WORD_BITS + || !(dfa->used_bkref_map + & ((bitset_word_t) 1 << node->token.opr.idx)))) + return node->left; + + /* Convert the SUBEXP node to the concatenation of an + OP_OPEN_SUBEXP, the contents, and an OP_CLOSE_SUBEXP. */ + op = create_tree (dfa, NULL, NULL, OP_OPEN_SUBEXP); + cls = create_tree (dfa, NULL, NULL, OP_CLOSE_SUBEXP); + tree1 = body ? create_tree (dfa, body, cls, CONCAT) : cls; + tree = create_tree (dfa, op, tree1, CONCAT); + if (BE (tree == NULL || tree1 == NULL || op == NULL || cls == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + + op->token.opr.idx = cls->token.opr.idx = node->token.opr.idx; + op->token.opt_subexp = cls->token.opt_subexp = node->token.opt_subexp; + return tree; +} + +/* Pass 1 in building the NFA: compute FIRST and create unlinked automaton + nodes. Requires a postorder visit. */ +static reg_errcode_t +calc_first (void *extra, bin_tree_t *node) +{ + re_dfa_t *dfa = (re_dfa_t *) extra; + if (node->token.type == CONCAT) + { + node->first = node->left->first; + node->node_idx = node->left->node_idx; + } + else + { + node->first = node; + node->node_idx = re_dfa_add_node (dfa, node->token); + if (BE (node->node_idx == -1, 0)) + return REG_ESPACE; + if (node->token.type == ANCHOR) + dfa->nodes[node->node_idx].constraint = node->token.opr.ctx_type; + } + return REG_NOERROR; +} + +/* Pass 2: compute NEXT on the tree. Preorder visit. */ +static reg_errcode_t +calc_next (void *extra, bin_tree_t *node) +{ + switch (node->token.type) + { + case OP_DUP_ASTERISK: + node->left->next = node; + break; + case CONCAT: + node->left->next = node->right->first; + node->right->next = node->next; + break; + default: + if (node->left) + node->left->next = node->next; + if (node->right) + node->right->next = node->next; + break; + } + return REG_NOERROR; +} + +/* Pass 3: link all DFA nodes to their NEXT node (any order will do). */ +static reg_errcode_t +link_nfa_nodes (void *extra, bin_tree_t *node) +{ + re_dfa_t *dfa = (re_dfa_t *) extra; + int idx = node->node_idx; + reg_errcode_t err = REG_NOERROR; + + switch (node->token.type) + { + case CONCAT: + break; + + case END_OF_RE: + assert (node->next == NULL); + break; + + case OP_DUP_ASTERISK: + case OP_ALT: + { + int left, right; + dfa->has_plural_match = 1; + if (node->left != NULL) + left = node->left->first->node_idx; + else + left = node->next->node_idx; + if (node->right != NULL) + right = node->right->first->node_idx; + else + right = node->next->node_idx; + assert (left > -1); + assert (right > -1); + err = re_node_set_init_2 (dfa->edests + idx, left, right); + } + break; + + case ANCHOR: + case OP_OPEN_SUBEXP: + case OP_CLOSE_SUBEXP: + err = re_node_set_init_1 (dfa->edests + idx, node->next->node_idx); + break; + + case OP_BACK_REF: + dfa->nexts[idx] = node->next->node_idx; + if (node->token.type == OP_BACK_REF) + err = re_node_set_init_1 (dfa->edests + idx, dfa->nexts[idx]); + break; + + default: + assert (!IS_EPSILON_NODE (node->token.type)); + dfa->nexts[idx] = node->next->node_idx; + break; + } + + return err; +} + +/* Duplicate the epsilon closure of the node ROOT_NODE. + Note that duplicated nodes have constraint INIT_CONSTRAINT in addition + to their own constraint. */ + +static reg_errcode_t +internal_function +duplicate_node_closure (re_dfa_t *dfa, int top_org_node, int top_clone_node, + int root_node, unsigned int init_constraint) +{ + int org_node, clone_node, ret; + unsigned int constraint = init_constraint; + for (org_node = top_org_node, clone_node = top_clone_node;;) + { + int org_dest, clone_dest; + if (dfa->nodes[org_node].type == OP_BACK_REF) + { + /* If the back reference epsilon-transit, its destination must + also have the constraint. Then duplicate the epsilon closure + of the destination of the back reference, and store it in + edests of the back reference. */ + org_dest = dfa->nexts[org_node]; + re_node_set_empty (dfa->edests + clone_node); + clone_dest = duplicate_node (dfa, org_dest, constraint); + if (BE (clone_dest == -1, 0)) + return REG_ESPACE; + dfa->nexts[clone_node] = dfa->nexts[org_node]; + ret = re_node_set_insert (dfa->edests + clone_node, clone_dest); + if (BE (ret < 0, 0)) + return REG_ESPACE; + } + else if (dfa->edests[org_node].nelem == 0) + { + /* In case of the node can't epsilon-transit, don't duplicate the + destination and store the original destination as the + destination of the node. */ + dfa->nexts[clone_node] = dfa->nexts[org_node]; + break; + } + else if (dfa->edests[org_node].nelem == 1) + { + /* In case of the node can epsilon-transit, and it has only one + destination. */ + org_dest = dfa->edests[org_node].elems[0]; + re_node_set_empty (dfa->edests + clone_node); + /* If the node is root_node itself, it means the epsilon clsoure + has a loop. Then tie it to the destination of the root_node. */ + if (org_node == root_node && clone_node != org_node) + { + ret = re_node_set_insert (dfa->edests + clone_node, org_dest); + if (BE (ret < 0, 0)) + return REG_ESPACE; + break; + } + /* In case of the node has another constraint, add it. */ + constraint |= dfa->nodes[org_node].constraint; + clone_dest = duplicate_node (dfa, org_dest, constraint); + if (BE (clone_dest == -1, 0)) + return REG_ESPACE; + ret = re_node_set_insert (dfa->edests + clone_node, clone_dest); + if (BE (ret < 0, 0)) + return REG_ESPACE; + } + else /* dfa->edests[org_node].nelem == 2 */ + { + /* In case of the node can epsilon-transit, and it has two + destinations. In the bin_tree_t and DFA, that's '|' and '*'. */ + org_dest = dfa->edests[org_node].elems[0]; + re_node_set_empty (dfa->edests + clone_node); + /* Search for a duplicated node which satisfies the constraint. */ + clone_dest = search_duplicated_node (dfa, org_dest, constraint); + if (clone_dest == -1) + { + /* There is no such duplicated node, create a new one. */ + reg_errcode_t err; + clone_dest = duplicate_node (dfa, org_dest, constraint); + if (BE (clone_dest == -1, 0)) + return REG_ESPACE; + ret = re_node_set_insert (dfa->edests + clone_node, clone_dest); + if (BE (ret < 0, 0)) + return REG_ESPACE; + err = duplicate_node_closure (dfa, org_dest, clone_dest, + root_node, constraint); + if (BE (err != REG_NOERROR, 0)) + return err; + } + else + { + /* There is a duplicated node which satisfies the constraint, + use it to avoid infinite loop. */ + ret = re_node_set_insert (dfa->edests + clone_node, clone_dest); + if (BE (ret < 0, 0)) + return REG_ESPACE; + } + + org_dest = dfa->edests[org_node].elems[1]; + clone_dest = duplicate_node (dfa, org_dest, constraint); + if (BE (clone_dest == -1, 0)) + return REG_ESPACE; + ret = re_node_set_insert (dfa->edests + clone_node, clone_dest); + if (BE (ret < 0, 0)) + return REG_ESPACE; + } + org_node = org_dest; + clone_node = clone_dest; + } + return REG_NOERROR; +} + +/* Search for a node which is duplicated from the node ORG_NODE, and + satisfies the constraint CONSTRAINT. */ + +static int +search_duplicated_node (const re_dfa_t *dfa, int org_node, + unsigned int constraint) +{ + int idx; + for (idx = dfa->nodes_len - 1; dfa->nodes[idx].duplicated && idx > 0; --idx) + { + if (org_node == dfa->org_indices[idx] + && constraint == dfa->nodes[idx].constraint) + return idx; /* Found. */ + } + return -1; /* Not found. */ +} + +/* Duplicate the node whose index is ORG_IDX and set the constraint CONSTRAINT. + Return the index of the new node, or -1 if insufficient storage is + available. */ + +static int +duplicate_node (re_dfa_t *dfa, int org_idx, unsigned int constraint) +{ + int dup_idx = re_dfa_add_node (dfa, dfa->nodes[org_idx]); + if (BE (dup_idx != -1, 1)) + { + dfa->nodes[dup_idx].constraint = constraint; + dfa->nodes[dup_idx].constraint |= dfa->nodes[org_idx].constraint; + dfa->nodes[dup_idx].duplicated = 1; + + /* Store the index of the original node. */ + dfa->org_indices[dup_idx] = org_idx; + } + return dup_idx; +} + +static reg_errcode_t +calc_inveclosure (re_dfa_t *dfa) +{ + int src, idx, ret; + for (idx = 0; idx < dfa->nodes_len; ++idx) + re_node_set_init_empty (dfa->inveclosures + idx); + + for (src = 0; src < dfa->nodes_len; ++src) + { + int *elems = dfa->eclosures[src].elems; + for (idx = 0; idx < dfa->eclosures[src].nelem; ++idx) + { + ret = re_node_set_insert_last (dfa->inveclosures + elems[idx], src); + if (BE (ret == -1, 0)) + return REG_ESPACE; + } + } + + return REG_NOERROR; +} + +/* Calculate "eclosure" for all the node in DFA. */ + +static reg_errcode_t +calc_eclosure (re_dfa_t *dfa) +{ + int node_idx, incomplete; +#ifdef DEBUG + assert (dfa->nodes_len > 0); +#endif + incomplete = 0; + /* For each nodes, calculate epsilon closure. */ + for (node_idx = 0; ; ++node_idx) + { + reg_errcode_t err; + re_node_set eclosure_elem; + if (node_idx == dfa->nodes_len) + { + if (!incomplete) + break; + incomplete = 0; + node_idx = 0; + } + +#ifdef DEBUG + assert (dfa->eclosures[node_idx].nelem != -1); +#endif + + /* If we have already calculated, skip it. */ + if (dfa->eclosures[node_idx].nelem != 0) + continue; + /* Calculate epsilon closure of `node_idx'. */ + err = calc_eclosure_iter (&eclosure_elem, dfa, node_idx, 1); + if (BE (err != REG_NOERROR, 0)) + return err; + + if (dfa->eclosures[node_idx].nelem == 0) + { + incomplete = 1; + re_node_set_free (&eclosure_elem); + } + } + return REG_NOERROR; +} + +/* Calculate epsilon closure of NODE. */ + +static reg_errcode_t +calc_eclosure_iter (re_node_set *new_set, re_dfa_t *dfa, int node, int root) +{ + reg_errcode_t err; + int i; + re_node_set eclosure; + int ret; + int incomplete = 0; + err = re_node_set_alloc (&eclosure, dfa->edests[node].nelem + 1); + if (BE (err != REG_NOERROR, 0)) + return err; + + /* This indicates that we are calculating this node now. + We reference this value to avoid infinite loop. */ + dfa->eclosures[node].nelem = -1; + + /* If the current node has constraints, duplicate all nodes + since they must inherit the constraints. */ + if (dfa->nodes[node].constraint + && dfa->edests[node].nelem + && !dfa->nodes[dfa->edests[node].elems[0]].duplicated) + { + err = duplicate_node_closure (dfa, node, node, node, + dfa->nodes[node].constraint); + if (BE (err != REG_NOERROR, 0)) + return err; + } + + /* Expand each epsilon destination nodes. */ + if (IS_EPSILON_NODE(dfa->nodes[node].type)) + for (i = 0; i < dfa->edests[node].nelem; ++i) + { + re_node_set eclosure_elem; + int edest = dfa->edests[node].elems[i]; + /* If calculating the epsilon closure of `edest' is in progress, + return intermediate result. */ + if (dfa->eclosures[edest].nelem == -1) + { + incomplete = 1; + continue; + } + /* If we haven't calculated the epsilon closure of `edest' yet, + calculate now. Otherwise use calculated epsilon closure. */ + if (dfa->eclosures[edest].nelem == 0) + { + err = calc_eclosure_iter (&eclosure_elem, dfa, edest, 0); + if (BE (err != REG_NOERROR, 0)) + return err; + } + else + eclosure_elem = dfa->eclosures[edest]; + /* Merge the epsilon closure of `edest'. */ + err = re_node_set_merge (&eclosure, &eclosure_elem); + if (BE (err != REG_NOERROR, 0)) + return err; + /* If the epsilon closure of `edest' is incomplete, + the epsilon closure of this node is also incomplete. */ + if (dfa->eclosures[edest].nelem == 0) + { + incomplete = 1; + re_node_set_free (&eclosure_elem); + } + } + + /* An epsilon closure includes itself. */ + ret = re_node_set_insert (&eclosure, node); + if (BE (ret < 0, 0)) + return REG_ESPACE; + if (incomplete && !root) + dfa->eclosures[node].nelem = 0; + else + dfa->eclosures[node] = eclosure; + *new_set = eclosure; + return REG_NOERROR; +} + +/* Functions for token which are used in the parser. */ + +/* Fetch a token from INPUT. + We must not use this function inside bracket expressions. */ + +static void +internal_function +fetch_token (re_token_t *result, re_string_t *input, reg_syntax_t syntax) +{ + re_string_skip_bytes (input, peek_token (result, input, syntax)); +} + +/* Peek a token from INPUT, and return the length of the token. + We must not use this function inside bracket expressions. */ + +static int +internal_function +peek_token (re_token_t *token, re_string_t *input, reg_syntax_t syntax) +{ + unsigned char c; + + if (re_string_eoi (input)) + { + token->type = END_OF_RE; + return 0; + } + + c = re_string_peek_byte (input, 0); + token->opr.c = c; + + token->word_char = 0; +#ifdef RE_ENABLE_I18N + token->mb_partial = 0; + if (input->mb_cur_max > 1 && + !re_string_first_byte (input, re_string_cur_idx (input))) + { + token->type = CHARACTER; + token->mb_partial = 1; + return 1; + } +#endif + if (c == '\\') + { + unsigned char c2; + if (re_string_cur_idx (input) + 1 >= re_string_length (input)) + { + token->type = BACK_SLASH; + return 1; + } + + c2 = re_string_peek_byte_case (input, 1); + token->opr.c = c2; + token->type = CHARACTER; +#ifdef RE_ENABLE_I18N + if (input->mb_cur_max > 1) + { + wint_t wc = re_string_wchar_at (input, + re_string_cur_idx (input) + 1); + token->word_char = IS_WIDE_WORD_CHAR (wc) != 0; + } + else +#endif + token->word_char = IS_WORD_CHAR (c2) != 0; + + switch (c2) + { + case '|': + if (!(syntax & RE_LIMITED_OPS) && !(syntax & RE_NO_BK_VBAR)) + token->type = OP_ALT; + break; + case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + if (!(syntax & RE_NO_BK_REFS)) + { + token->type = OP_BACK_REF; + token->opr.idx = c2 - '1'; + } + break; + case '<': + if (!(syntax & RE_NO_GNU_OPS)) + { + token->type = ANCHOR; + token->opr.ctx_type = WORD_FIRST; + } + break; + case '>': + if (!(syntax & RE_NO_GNU_OPS)) + { + token->type = ANCHOR; + token->opr.ctx_type = WORD_LAST; + } + break; + case 'b': + if (!(syntax & RE_NO_GNU_OPS)) + { + token->type = ANCHOR; + token->opr.ctx_type = WORD_DELIM; + } + break; + case 'B': + if (!(syntax & RE_NO_GNU_OPS)) + { + token->type = ANCHOR; + token->opr.ctx_type = NOT_WORD_DELIM; + } + break; + case 'w': + if (!(syntax & RE_NO_GNU_OPS)) + token->type = OP_WORD; + break; + case 'W': + if (!(syntax & RE_NO_GNU_OPS)) + token->type = OP_NOTWORD; + break; + case 's': + if (!(syntax & RE_NO_GNU_OPS)) + token->type = OP_SPACE; + break; + case 'S': + if (!(syntax & RE_NO_GNU_OPS)) + token->type = OP_NOTSPACE; + break; + case '`': + if (!(syntax & RE_NO_GNU_OPS)) + { + token->type = ANCHOR; + token->opr.ctx_type = BUF_FIRST; + } + break; + case '\'': + if (!(syntax & RE_NO_GNU_OPS)) + { + token->type = ANCHOR; + token->opr.ctx_type = BUF_LAST; + } + break; + case '(': + if (!(syntax & RE_NO_BK_PARENS)) + token->type = OP_OPEN_SUBEXP; + break; + case ')': + if (!(syntax & RE_NO_BK_PARENS)) + token->type = OP_CLOSE_SUBEXP; + break; + case '+': + if (!(syntax & RE_LIMITED_OPS) && (syntax & RE_BK_PLUS_QM)) + token->type = OP_DUP_PLUS; + break; + case '?': + if (!(syntax & RE_LIMITED_OPS) && (syntax & RE_BK_PLUS_QM)) + token->type = OP_DUP_QUESTION; + break; + case '{': + if ((syntax & RE_INTERVALS) && (!(syntax & RE_NO_BK_BRACES))) + token->type = OP_OPEN_DUP_NUM; + break; + case '}': + if ((syntax & RE_INTERVALS) && (!(syntax & RE_NO_BK_BRACES))) + token->type = OP_CLOSE_DUP_NUM; + break; + default: + break; + } + return 2; + } + + token->type = CHARACTER; +#ifdef RE_ENABLE_I18N + if (input->mb_cur_max > 1) + { + wint_t wc = re_string_wchar_at (input, re_string_cur_idx (input)); + token->word_char = IS_WIDE_WORD_CHAR (wc) != 0; + } + else +#endif + token->word_char = IS_WORD_CHAR (token->opr.c); + + switch (c) + { + case '\n': + if (syntax & RE_NEWLINE_ALT) + token->type = OP_ALT; + break; + case '|': + if (!(syntax & RE_LIMITED_OPS) && (syntax & RE_NO_BK_VBAR)) + token->type = OP_ALT; + break; + case '*': + token->type = OP_DUP_ASTERISK; + break; + case '+': + if (!(syntax & RE_LIMITED_OPS) && !(syntax & RE_BK_PLUS_QM)) + token->type = OP_DUP_PLUS; + break; + case '?': + if (!(syntax & RE_LIMITED_OPS) && !(syntax & RE_BK_PLUS_QM)) + token->type = OP_DUP_QUESTION; + break; + case '{': + if ((syntax & RE_INTERVALS) && (syntax & RE_NO_BK_BRACES)) + token->type = OP_OPEN_DUP_NUM; + break; + case '}': + if ((syntax & RE_INTERVALS) && (syntax & RE_NO_BK_BRACES)) + token->type = OP_CLOSE_DUP_NUM; + break; + case '(': + if (syntax & RE_NO_BK_PARENS) + token->type = OP_OPEN_SUBEXP; + break; + case ')': + if (syntax & RE_NO_BK_PARENS) + token->type = OP_CLOSE_SUBEXP; + break; + case '[': + token->type = OP_OPEN_BRACKET; + break; + case '.': + token->type = OP_PERIOD; + break; + case '^': + if (!(syntax & (RE_CONTEXT_INDEP_ANCHORS | RE_CARET_ANCHORS_HERE)) && + re_string_cur_idx (input) != 0) + { + char prev = re_string_peek_byte (input, -1); + if (!(syntax & RE_NEWLINE_ALT) || prev != '\n') + break; + } + token->type = ANCHOR; + token->opr.ctx_type = LINE_FIRST; + break; + case '$': + if (!(syntax & RE_CONTEXT_INDEP_ANCHORS) && + re_string_cur_idx (input) + 1 != re_string_length (input)) + { + re_token_t next; + re_string_skip_bytes (input, 1); + peek_token (&next, input, syntax); + re_string_skip_bytes (input, -1); + if (next.type != OP_ALT && next.type != OP_CLOSE_SUBEXP) + break; + } + token->type = ANCHOR; + token->opr.ctx_type = LINE_LAST; + break; + default: + break; + } + return 1; +} + +/* Peek a token from INPUT, and return the length of the token. + We must not use this function out of bracket expressions. */ + +static int +internal_function +peek_token_bracket (re_token_t *token, re_string_t *input, reg_syntax_t syntax) +{ + unsigned char c; + if (re_string_eoi (input)) + { + token->type = END_OF_RE; + return 0; + } + c = re_string_peek_byte (input, 0); + token->opr.c = c; + +#ifdef RE_ENABLE_I18N + if (input->mb_cur_max > 1 && + !re_string_first_byte (input, re_string_cur_idx (input))) + { + token->type = CHARACTER; + return 1; + } +#endif /* RE_ENABLE_I18N */ + + if (c == '\\' && (syntax & RE_BACKSLASH_ESCAPE_IN_LISTS) + && re_string_cur_idx (input) + 1 < re_string_length (input)) + { + /* In this case, '\' escape a character. */ + unsigned char c2; + re_string_skip_bytes (input, 1); + c2 = re_string_peek_byte (input, 0); + token->opr.c = c2; + token->type = CHARACTER; + return 1; + } + if (c == '[') /* '[' is a special char in a bracket exps. */ + { + unsigned char c2; + int token_len; + if (re_string_cur_idx (input) + 1 < re_string_length (input)) + c2 = re_string_peek_byte (input, 1); + else + c2 = 0; + token->opr.c = c2; + token_len = 2; + switch (c2) + { + case '.': + token->type = OP_OPEN_COLL_ELEM; + break; + case '=': + token->type = OP_OPEN_EQUIV_CLASS; + break; + case ':': + if (syntax & RE_CHAR_CLASSES) + { + token->type = OP_OPEN_CHAR_CLASS; + break; + } + /* else fall through. */ + default: + token->type = CHARACTER; + token->opr.c = c; + token_len = 1; + break; + } + return token_len; + } + switch (c) + { + case '-': + token->type = OP_CHARSET_RANGE; + break; + case ']': + token->type = OP_CLOSE_BRACKET; + break; + case '^': + token->type = OP_NON_MATCH_LIST; + break; + default: + token->type = CHARACTER; + } + return 1; +} + +/* Functions for parser. */ + +/* Entry point of the parser. + Parse the regular expression REGEXP and return the structure tree. + If an error has occurred, ERR is set by error code, and return NULL. + This function build the following tree, from regular expression <reg_exp>: + CAT + / \ + / \ + <reg_exp> EOR + + CAT means concatenation. + EOR means end of regular expression. */ + +static bin_tree_t * +parse (re_string_t *regexp, regex_t *preg, reg_syntax_t syntax, + reg_errcode_t *err) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + bin_tree_t *tree, *eor, *root; + re_token_t current_token; + dfa->syntax = syntax; + fetch_token (¤t_token, regexp, syntax | RE_CARET_ANCHORS_HERE); + tree = parse_reg_exp (regexp, preg, ¤t_token, syntax, 0, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + eor = create_tree (dfa, NULL, NULL, END_OF_RE); + if (tree != NULL) + root = create_tree (dfa, tree, eor, CONCAT); + else + root = eor; + if (BE (eor == NULL || root == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + return root; +} + +/* This function build the following tree, from regular expression + <branch1>|<branch2>: + ALT + / \ + / \ + <branch1> <branch2> + + ALT means alternative, which represents the operator `|'. */ + +static bin_tree_t * +parse_reg_exp (re_string_t *regexp, regex_t *preg, re_token_t *token, + reg_syntax_t syntax, int nest, reg_errcode_t *err) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + bin_tree_t *tree, *branch = NULL; + tree = parse_branch (regexp, preg, token, syntax, nest, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + + while (token->type == OP_ALT) + { + fetch_token (token, regexp, syntax | RE_CARET_ANCHORS_HERE); + if (token->type != OP_ALT && token->type != END_OF_RE + && (nest == 0 || token->type != OP_CLOSE_SUBEXP)) + { + branch = parse_branch (regexp, preg, token, syntax, nest, err); + if (BE (*err != REG_NOERROR && branch == NULL, 0)) + return NULL; + } + else + branch = NULL; + tree = create_tree (dfa, tree, branch, OP_ALT); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + } + return tree; +} + +/* This function build the following tree, from regular expression + <exp1><exp2>: + CAT + / \ + / \ + <exp1> <exp2> + + CAT means concatenation. */ + +static bin_tree_t * +parse_branch (re_string_t *regexp, regex_t *preg, re_token_t *token, + reg_syntax_t syntax, int nest, reg_errcode_t *err) +{ + bin_tree_t *tree, *exp; + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + tree = parse_expression (regexp, preg, token, syntax, nest, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + + while (token->type != OP_ALT && token->type != END_OF_RE + && (nest == 0 || token->type != OP_CLOSE_SUBEXP)) + { + exp = parse_expression (regexp, preg, token, syntax, nest, err); + if (BE (*err != REG_NOERROR && exp == NULL, 0)) + { + return NULL; + } + if (tree != NULL && exp != NULL) + { + tree = create_tree (dfa, tree, exp, CONCAT); + if (tree == NULL) + { + *err = REG_ESPACE; + return NULL; + } + } + else if (tree == NULL) + tree = exp; + /* Otherwise exp == NULL, we don't need to create new tree. */ + } + return tree; +} + +/* This function build the following tree, from regular expression a*: + * + | + a +*/ + +static bin_tree_t * +parse_expression (re_string_t *regexp, regex_t *preg, re_token_t *token, + reg_syntax_t syntax, int nest, reg_errcode_t *err) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + bin_tree_t *tree; + switch (token->type) + { + case CHARACTER: + tree = create_token_tree (dfa, NULL, NULL, token); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + { + while (!re_string_eoi (regexp) + && !re_string_first_byte (regexp, re_string_cur_idx (regexp))) + { + bin_tree_t *mbc_remain; + fetch_token (token, regexp, syntax); + mbc_remain = create_token_tree (dfa, NULL, NULL, token); + tree = create_tree (dfa, tree, mbc_remain, CONCAT); + if (BE (mbc_remain == NULL || tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + } + } +#endif + break; + case OP_OPEN_SUBEXP: + tree = parse_sub_exp (regexp, preg, token, syntax, nest + 1, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + break; + case OP_OPEN_BRACKET: + tree = parse_bracket_exp (regexp, dfa, token, syntax, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + break; + case OP_BACK_REF: + if (!BE (dfa->completed_bkref_map & (1 << token->opr.idx), 1)) + { + *err = REG_ESUBREG; + return NULL; + } + dfa->used_bkref_map |= 1 << token->opr.idx; + tree = create_token_tree (dfa, NULL, NULL, token); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + ++dfa->nbackref; + dfa->has_mb_node = 1; + break; + case OP_OPEN_DUP_NUM: + if (syntax & RE_CONTEXT_INVALID_DUP) + { + *err = REG_BADRPT; + return NULL; + } + /* FALLTHROUGH */ + case OP_DUP_ASTERISK: + case OP_DUP_PLUS: + case OP_DUP_QUESTION: + if (syntax & RE_CONTEXT_INVALID_OPS) + { + *err = REG_BADRPT; + return NULL; + } + else if (syntax & RE_CONTEXT_INDEP_OPS) + { + fetch_token (token, regexp, syntax); + return parse_expression (regexp, preg, token, syntax, nest, err); + } + /* else fall through */ + case OP_CLOSE_SUBEXP: + if ((token->type == OP_CLOSE_SUBEXP) && + !(syntax & RE_UNMATCHED_RIGHT_PAREN_ORD)) + { + *err = REG_ERPAREN; + return NULL; + } + /* else fall through */ + case OP_CLOSE_DUP_NUM: + /* We treat it as a normal character. */ + + /* Then we can these characters as normal characters. */ + token->type = CHARACTER; + /* mb_partial and word_char bits should be initialized already + by peek_token. */ + tree = create_token_tree (dfa, NULL, NULL, token); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + break; + case ANCHOR: + if ((token->opr.ctx_type + & (WORD_DELIM | NOT_WORD_DELIM | WORD_FIRST | WORD_LAST)) + && dfa->word_ops_used == 0) + init_word_char (dfa); + if (token->opr.ctx_type == WORD_DELIM + || token->opr.ctx_type == NOT_WORD_DELIM) + { + bin_tree_t *tree_first, *tree_last; + if (token->opr.ctx_type == WORD_DELIM) + { + token->opr.ctx_type = WORD_FIRST; + tree_first = create_token_tree (dfa, NULL, NULL, token); + token->opr.ctx_type = WORD_LAST; + } + else + { + token->opr.ctx_type = INSIDE_WORD; + tree_first = create_token_tree (dfa, NULL, NULL, token); + token->opr.ctx_type = INSIDE_NOTWORD; + } + tree_last = create_token_tree (dfa, NULL, NULL, token); + tree = create_tree (dfa, tree_first, tree_last, OP_ALT); + if (BE (tree_first == NULL || tree_last == NULL || tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + } + else + { + tree = create_token_tree (dfa, NULL, NULL, token); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + } + /* We must return here, since ANCHORs can't be followed + by repetition operators. + eg. RE"^*" is invalid or "<ANCHOR(^)><CHAR(*)>", + it must not be "<ANCHOR(^)><REPEAT(*)>". */ + fetch_token (token, regexp, syntax); + return tree; + case OP_PERIOD: + tree = create_token_tree (dfa, NULL, NULL, token); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + if (dfa->mb_cur_max > 1) + dfa->has_mb_node = 1; + break; + case OP_WORD: + case OP_NOTWORD: + tree = build_charclass_op (dfa, regexp->trans, + "alnum", + "_", + token->type == OP_NOTWORD, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + break; + case OP_SPACE: + case OP_NOTSPACE: + tree = build_charclass_op (dfa, regexp->trans, + "space", + "", + token->type == OP_NOTSPACE, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + break; + case OP_ALT: + case END_OF_RE: + return NULL; + case BACK_SLASH: + *err = REG_EESCAPE; + return NULL; + default: + /* Must not happen? */ +#ifdef DEBUG + assert (0); +#endif + return NULL; + } + fetch_token (token, regexp, syntax); + + while (token->type == OP_DUP_ASTERISK || token->type == OP_DUP_PLUS + || token->type == OP_DUP_QUESTION || token->type == OP_OPEN_DUP_NUM) + { + tree = parse_dup_op (tree, regexp, dfa, token, syntax, err); + if (BE (*err != REG_NOERROR && tree == NULL, 0)) + return NULL; + /* In BRE consecutive duplications are not allowed. */ + if ((syntax & RE_CONTEXT_INVALID_DUP) + && (token->type == OP_DUP_ASTERISK + || token->type == OP_OPEN_DUP_NUM)) + { + *err = REG_BADRPT; + return NULL; + } + } + + return tree; +} + +/* This function build the following tree, from regular expression + (<reg_exp>): + SUBEXP + | + <reg_exp> +*/ + +static bin_tree_t * +parse_sub_exp (re_string_t *regexp, regex_t *preg, re_token_t *token, + reg_syntax_t syntax, int nest, reg_errcode_t *err) +{ + re_dfa_t *dfa = (re_dfa_t *) preg->buffer; + bin_tree_t *tree; + size_t cur_nsub; + cur_nsub = preg->re_nsub++; + + fetch_token (token, regexp, syntax | RE_CARET_ANCHORS_HERE); + + /* The subexpression may be a null string. */ + if (token->type == OP_CLOSE_SUBEXP) + tree = NULL; + else + { + tree = parse_reg_exp (regexp, preg, token, syntax, nest, err); + if (BE (*err == REG_NOERROR && token->type != OP_CLOSE_SUBEXP, 0)) + *err = REG_EPAREN; + if (BE (*err != REG_NOERROR, 0)) + return NULL; + } + + if (cur_nsub <= '9' - '1') + dfa->completed_bkref_map |= 1 << cur_nsub; + + tree = create_tree (dfa, tree, NULL, SUBEXP); + if (BE (tree == NULL, 0)) + { + *err = REG_ESPACE; + return NULL; + } + tree->token.opr.idx = cur_nsub; + return tree; +} + +/* This function parse repetition operators like "*", "+", "{1,3}" etc. */ + +static bin_tree_t * +parse_dup_op (bin_tree_t *elem, re_string_t *regexp, re_dfa_t *dfa, + re_token_t *token, reg_syntax_t syntax, reg_errcode_t *err) +{ + bin_tree_t *tree = NULL, *old_tree = NULL; + int i, start, end, start_idx = re_string_cur_idx (regexp); +#ifndef RE_TOKEN_INIT_BUG + re_token_t start_token = *token; +#else + re_token_t start_token; + + memcpy ((void *) &start_token, (void *) token, sizeof start_token); +#endif + + if (token->type == OP_OPEN_DUP_NUM) + { + end = 0; + start = fetch_number (regexp, token, syntax); + if (start == -1) + { + if (token->type == CHARACTER && token->opr.c == ',') + start = 0; /* We treat "{,m}" as "{0,m}". */ + else + { + *err = REG_BADBR; /* <re>{} is invalid. */ + return NULL; + } + } + if (BE (start != -2, 1)) + { + /* We treat "{n}" as "{n,n}". */ + end = ((token->type == OP_CLOSE_DUP_NUM) ? start + : ((token->type == CHARACTER && token->opr.c == ',') + ? fetch_number (regexp, token, syntax) : -2)); + } + if (BE (start == -2 || end == -2, 0)) + { + /* Invalid sequence. */ + if (BE (!(syntax & RE_INVALID_INTERVAL_ORD), 0)) + { + if (token->type == END_OF_RE) + *err = REG_EBRACE; + else + *err = REG_BADBR; + + return NULL; + } + + /* If the syntax bit is set, rollback. */ + re_string_set_index (regexp, start_idx); + *token = start_token; + token->type = CHARACTER; + /* mb_partial and word_char bits should be already initialized by + peek_token. */ + return elem; + } + + if (BE ((end != -1 && start > end) || token->type != OP_CLOSE_DUP_NUM, 0)) + { + /* First number greater than second. */ + *err = REG_BADBR; + return NULL; + } + } + else + { + start = (token->type == OP_DUP_PLUS) ? 1 : 0; + end = (token->type == OP_DUP_QUESTION) ? 1 : -1; + } + + fetch_token (token, regexp, syntax); + + if (BE (elem == NULL, 0)) + return NULL; + if (BE (start == 0 && end == 0, 0)) + { + postorder (elem, free_tree, NULL); + return NULL; + } + + /* Extract "<re>{n,m}" to "<re><re>...<re><re>{0,<m-n>}". */ + if (BE (start > 0, 0)) + { + tree = elem; + for (i = 2; i <= start; ++i) + { + elem = duplicate_tree (elem, dfa); + tree = create_tree (dfa, tree, elem, CONCAT); + if (BE (elem == NULL || tree == NULL, 0)) + goto parse_dup_op_espace; + } + + if (start == end) + return tree; + + /* Duplicate ELEM before it is marked optional. */ + elem = duplicate_tree (elem, dfa); + old_tree = tree; + } + else + old_tree = NULL; + + if (elem->token.type == SUBEXP) + postorder (elem, mark_opt_subexp, (void *) (long) elem->token.opr.idx); + + tree = create_tree (dfa, elem, NULL, (end == -1 ? OP_DUP_ASTERISK : OP_ALT)); + if (BE (tree == NULL, 0)) + goto parse_dup_op_espace; + + /* This loop is actually executed only when end != -1, + to rewrite <re>{0,n} as (<re>(<re>...<re>?)?)?... We have + already created the start+1-th copy. */ + for (i = start + 2; i <= end; ++i) + { + elem = duplicate_tree (elem, dfa); + tree = create_tree (dfa, tree, elem, CONCAT); + if (BE (elem == NULL || tree == NULL, 0)) + goto parse_dup_op_espace; + + tree = create_tree (dfa, tree, NULL, OP_ALT); + if (BE (tree == NULL, 0)) + goto parse_dup_op_espace; + } + + if (old_tree) + tree = create_tree (dfa, old_tree, tree, CONCAT); + + return tree; + + parse_dup_op_espace: + *err = REG_ESPACE; + return NULL; +} + +/* Size of the names for collating symbol/equivalence_class/character_class. + I'm not sure, but maybe enough. */ +#define BRACKET_NAME_BUF_SIZE 32 + +#ifndef _LIBC + /* Local function for parse_bracket_exp only used in case of NOT _LIBC. + Build the range expression which starts from START_ELEM, and ends + at END_ELEM. The result are written to MBCSET and SBCSET. + RANGE_ALLOC is the allocated size of mbcset->range_starts, and + mbcset->range_ends, is a pointer argument since we may + update it. */ + +static reg_errcode_t +internal_function +# ifdef RE_ENABLE_I18N +build_range_exp (bitset_t sbcset, re_charset_t *mbcset, int *range_alloc, + bracket_elem_t *start_elem, bracket_elem_t *end_elem) +# else /* not RE_ENABLE_I18N */ +build_range_exp (bitset_t sbcset, bracket_elem_t *start_elem, + bracket_elem_t *end_elem) +# endif /* not RE_ENABLE_I18N */ +{ + unsigned int start_ch, end_ch; + /* Equivalence Classes and Character Classes can't be a range start/end. */ + if (BE (start_elem->type == EQUIV_CLASS || start_elem->type == CHAR_CLASS + || end_elem->type == EQUIV_CLASS || end_elem->type == CHAR_CLASS, + 0)) + return REG_ERANGE; + + /* We can handle no multi character collating elements without libc + support. */ + if (BE ((start_elem->type == COLL_SYM + && strlen ((char *) start_elem->opr.name) > 1) + || (end_elem->type == COLL_SYM + && strlen ((char *) end_elem->opr.name) > 1), 0)) + return REG_ECOLLATE; + +# ifdef RE_ENABLE_I18N + { + wchar_t wc; + wint_t start_wc; + wint_t end_wc; + wchar_t cmp_buf[6] = {L'\0', L'\0', L'\0', L'\0', L'\0', L'\0'}; + + start_ch = ((start_elem->type == SB_CHAR) ? start_elem->opr.ch + : ((start_elem->type == COLL_SYM) ? start_elem->opr.name[0] + : 0)); + end_ch = ((end_elem->type == SB_CHAR) ? end_elem->opr.ch + : ((end_elem->type == COLL_SYM) ? end_elem->opr.name[0] + : 0)); +#ifdef GAWK + /* + * Fedora Core 2, maybe others, have broken `btowc' that returns -1 + * for any value > 127. Sigh. Note that `start_ch' and `end_ch' are + * unsigned, so we don't have sign extension problems. + */ + start_wc = ((start_elem->type == SB_CHAR || start_elem->type == COLL_SYM) + ? start_ch : start_elem->opr.wch); + end_wc = ((end_elem->type == SB_CHAR || end_elem->type == COLL_SYM) + ? end_ch : end_elem->opr.wch); +#else + start_wc = ((start_elem->type == SB_CHAR || start_elem->type == COLL_SYM) + ? __btowc (start_ch) : start_elem->opr.wch); + end_wc = ((end_elem->type == SB_CHAR || end_elem->type == COLL_SYM) + ? __btowc (end_ch) : end_elem->opr.wch); +#endif + if (start_wc == WEOF || end_wc == WEOF) + return REG_ECOLLATE; + cmp_buf[0] = start_wc; + cmp_buf[4] = end_wc; + if (wcscoll (cmp_buf, cmp_buf + 4) > 0) + return REG_ERANGE; + + /* Got valid collation sequence values, add them as a new entry. + However, for !_LIBC we have no collation elements: if the + character set is single byte, the single byte character set + that we build below suffices. parse_bracket_exp passes + no MBCSET if dfa->mb_cur_max == 1. */ + if (mbcset) + { + /* Check the space of the arrays. */ + if (BE (*range_alloc == mbcset->nranges, 0)) + { + /* There is not enough space, need realloc. */ + wchar_t *new_array_start, *new_array_end; + int new_nranges; + + /* +1 in case of mbcset->nranges is 0. */ + new_nranges = 2 * mbcset->nranges + 1; + /* Use realloc since mbcset->range_starts and mbcset->range_ends + are NULL if *range_alloc == 0. */ + new_array_start = re_realloc (mbcset->range_starts, wchar_t, + new_nranges); + new_array_end = re_realloc (mbcset->range_ends, wchar_t, + new_nranges); + + if (BE (new_array_start == NULL || new_array_end == NULL, 0)) + return REG_ESPACE; + + mbcset->range_starts = new_array_start; + mbcset->range_ends = new_array_end; + *range_alloc = new_nranges; + } + + mbcset->range_starts[mbcset->nranges] = start_wc; + mbcset->range_ends[mbcset->nranges++] = end_wc; + } + + /* Build the table for single byte characters. */ + for (wc = 0; wc < SBC_MAX; ++wc) + { + cmp_buf[2] = wc; + if (wcscoll (cmp_buf, cmp_buf + 2) <= 0 + && wcscoll (cmp_buf + 2, cmp_buf + 4) <= 0) + bitset_set (sbcset, wc); + } + } +# else /* not RE_ENABLE_I18N */ + { + unsigned int ch; + start_ch = ((start_elem->type == SB_CHAR ) ? start_elem->opr.ch + : ((start_elem->type == COLL_SYM) ? start_elem->opr.name[0] + : 0)); + end_ch = ((end_elem->type == SB_CHAR ) ? end_elem->opr.ch + : ((end_elem->type == COLL_SYM) ? end_elem->opr.name[0] + : 0)); + if (start_ch > end_ch) + return REG_ERANGE; + /* Build the table for single byte characters. */ + for (ch = 0; ch < SBC_MAX; ++ch) + if (start_ch <= ch && ch <= end_ch) + bitset_set (sbcset, ch); + } +# endif /* not RE_ENABLE_I18N */ + return REG_NOERROR; +} +#endif /* not _LIBC */ + +#ifndef _LIBC +/* Helper function for parse_bracket_exp only used in case of NOT _LIBC.. + Build the collating element which is represented by NAME. + The result are written to MBCSET and SBCSET. + COLL_SYM_ALLOC is the allocated size of mbcset->coll_sym, is a + pointer argument since we may update it. */ + +static reg_errcode_t +internal_function +# ifdef RE_ENABLE_I18N +build_collating_symbol (bitset_t sbcset, re_charset_t *mbcset, + int *coll_sym_alloc, const unsigned char *name) +# else /* not RE_ENABLE_I18N */ +build_collating_symbol (bitset_t sbcset, const unsigned char *name) +# endif /* not RE_ENABLE_I18N */ +{ + size_t name_len = strlen ((const char *) name); + if (BE (name_len != 1, 0)) + return REG_ECOLLATE; + else + { + bitset_set (sbcset, name[0]); + return REG_NOERROR; + } +} +#endif /* not _LIBC */ + +/* This function parse bracket expression like "[abc]", "[a-c]", + "[[.a-a.]]" etc. */ + +static bin_tree_t * +parse_bracket_exp (re_string_t *regexp, re_dfa_t *dfa, re_token_t *token, + reg_syntax_t syntax, reg_errcode_t *err) +{ +#ifdef _LIBC + const unsigned char *collseqmb; + const char *collseqwc; + uint32_t nrules; + int32_t table_size; + const int32_t *symb_table; + const unsigned char *extra; + + /* Local function for parse_bracket_exp used in _LIBC environment. + Seek the collating symbol entry correspondings to NAME. + Return the index of the symbol in the SYMB_TABLE. */ + + auto inline int32_t + __attribute ((always_inline)) + seek_collating_symbol_entry (name, name_len) + const unsigned char *name; + size_t name_len; + { + int32_t hash = elem_hash ((const char *) name, name_len); + int32_t elem = hash % table_size; + if (symb_table[2 * elem] != 0) + { + int32_t second = hash % (table_size - 2) + 1; + + do + { + /* First compare the hashing value. */ + if (symb_table[2 * elem] == hash + /* Compare the length of the name. */ + && name_len == extra[symb_table[2 * elem + 1]] + /* Compare the name. */ + && memcmp (name, &extra[symb_table[2 * elem + 1] + 1], + name_len) == 0) + { + /* Yep, this is the entry. */ + break; + } + + /* Next entry. */ + elem += second; + } + while (symb_table[2 * elem] != 0); + } + return elem; + } + + /* Local function for parse_bracket_exp used in _LIBC environment. + Look up the collation sequence value of BR_ELEM. + Return the value if succeeded, UINT_MAX otherwise. */ + + auto inline unsigned int + __attribute ((always_inline)) + lookup_collation_sequence_value (br_elem) + bracket_elem_t *br_elem; + { + if (br_elem->type == SB_CHAR) + { + /* + if (MB_CUR_MAX == 1) + */ + if (nrules == 0) + return collseqmb[br_elem->opr.ch]; + else + { + wint_t wc = __btowc (br_elem->opr.ch); + return __collseq_table_lookup (collseqwc, wc); + } + } + else if (br_elem->type == MB_CHAR) + { + if (nrules != 0) + return __collseq_table_lookup (collseqwc, br_elem->opr.wch); + } + else if (br_elem->type == COLL_SYM) + { + size_t sym_name_len = strlen ((char *) br_elem->opr.name); + if (nrules != 0) + { + int32_t elem, idx; + elem = seek_collating_symbol_entry (br_elem->opr.name, + sym_name_len); + if (symb_table[2 * elem] != 0) + { + /* We found the entry. */ + idx = symb_table[2 * elem + 1]; + /* Skip the name of collating element name. */ + idx += 1 + extra[idx]; + /* Skip the byte sequence of the collating element. */ + idx += 1 + extra[idx]; + /* Adjust for the alignment. */ + idx = (idx + 3) & ~3; + /* Skip the multibyte collation sequence value. */ + idx += sizeof (unsigned int); + /* Skip the wide char sequence of the collating element. */ + idx += sizeof (unsigned int) * + (1 + *(unsigned int *) (extra + idx)); + /* Return the collation sequence value. */ + return *(unsigned int *) (extra + idx); + } + else if (symb_table[2 * elem] == 0 && sym_name_len == 1) + { + /* No valid character. Match it as a single byte + character. */ + return collseqmb[br_elem->opr.name[0]]; + } + } + else if (sym_name_len == 1) + return collseqmb[br_elem->opr.name[0]]; + } + return UINT_MAX; + } + + /* Local function for parse_bracket_exp used in _LIBC environment. + Build the range expression which starts from START_ELEM, and ends + at END_ELEM. The result are written to MBCSET and SBCSET. + RANGE_ALLOC is the allocated size of mbcset->range_starts, and + mbcset->range_ends, is a pointer argument since we may + update it. */ + + auto inline reg_errcode_t + __attribute ((always_inline)) + build_range_exp (sbcset, mbcset, range_alloc, start_elem, end_elem) + re_charset_t *mbcset; + int *range_alloc; + bitset_t sbcset; + bracket_elem_t *start_elem, *end_elem; + { + unsigned int ch; + uint32_t start_collseq; + uint32_t end_collseq; + + /* Equivalence Classes and Character Classes can't be a range + start/end. */ + if (BE (start_elem->type == EQUIV_CLASS || start_elem->type == CHAR_CLASS + || end_elem->type == EQUIV_CLASS || end_elem->type == CHAR_CLASS, + 0)) + return REG_ERANGE; + + start_collseq = lookup_collation_sequence_value (start_elem); + end_collseq = lookup_collation_sequence_value (end_elem); + /* Check start/end collation sequence values. */ + if (BE (start_collseq == UINT_MAX || end_collseq == UINT_MAX, 0)) + return REG_ECOLLATE; + if (BE ((syntax & RE_NO_EMPTY_RANGES) && start_collseq > end_collseq, 0)) + return REG_ERANGE; + + /* Got valid collation sequence values, add them as a new entry. + However, if we have no collation elements, and the character set + is single byte, the single byte character set that we + build below suffices. */ + if (nrules > 0 || dfa->mb_cur_max > 1) + { + /* Check the space of the arrays. */ + if (BE (*range_alloc == mbcset->nranges, 0)) + { + /* There is not enough space, need realloc. */ + uint32_t *new_array_start; + uint32_t *new_array_end; + int new_nranges; + + /* +1 in case of mbcset->nranges is 0. */ + new_nranges = 2 * mbcset->nranges + 1; + new_array_start = re_realloc (mbcset->range_starts, uint32_t, + new_nranges); + new_array_end = re_realloc (mbcset->range_ends, uint32_t, + new_nranges); + + if (BE (new_array_start == NULL || new_array_end == NULL, 0)) + return REG_ESPACE; + + mbcset->range_starts = new_array_start; + mbcset->range_ends = new_array_end; + *range_alloc = new_nranges; + } + + mbcset->range_starts[mbcset->nranges] = start_collseq; + mbcset->range_ends[mbcset->nranges++] = end_collseq; + } + + /* Build the table for single byte characters. */ + for (ch = 0; ch < SBC_MAX; ch++) + { + uint32_t ch_collseq; + /* + if (MB_CUR_MAX == 1) + */ + if (nrules == 0) + ch_collseq = collseqmb[ch]; + else + ch_collseq = __collseq_table_lookup (collseqwc, __btowc (ch)); + if (start_collseq <= ch_collseq && ch_collseq <= end_collseq) + bitset_set (sbcset, ch); + } + return REG_NOERROR; + } + + /* Local function for parse_bracket_exp used in _LIBC environment. + Build the collating element which is represented by NAME. + The result are written to MBCSET and SBCSET. + COLL_SYM_ALLOC is the allocated size of mbcset->coll_sym, is a + pointer argument since we may update it. */ + + auto inline reg_errcode_t + __attribute ((always_inline)) + build_collating_symbol (sbcset, mbcset, coll_sym_alloc, name) + re_charset_t *mbcset; + int *coll_sym_alloc; + bitset_t sbcset; + const unsigned char *name; + { + int32_t elem, idx; + size_t name_len = strlen ((const char *) name); + if (nrules != 0) + { + elem = seek_collating_symbol_entry (name, name_len); + if (symb_table[2 * elem] != 0) + { + /* We found the entry. */ + idx = symb_table[2 * elem + 1]; + /* Skip the name of collating element name. */ + idx += 1 + extra[idx]; + } + else if (symb_table[2 * elem] == 0 && name_len == 1) + { + /* No valid character, treat it as a normal + character. */ + bitset_set (sbcset, name[0]); + return REG_NOERROR; + } + else + return REG_ECOLLATE; + + /* Got valid collation sequence, add it as a new entry. */ + /* Check the space of the arrays. */ + if (BE (*coll_sym_alloc == mbcset->ncoll_syms, 0)) + { + /* Not enough, realloc it. */ + /* +1 in case of mbcset->ncoll_syms is 0. */ + int new_coll_sym_alloc = 2 * mbcset->ncoll_syms + 1; + /* Use realloc since mbcset->coll_syms is NULL + if *alloc == 0. */ + int32_t *new_coll_syms = re_realloc (mbcset->coll_syms, int32_t, + new_coll_sym_alloc); + if (BE (new_coll_syms == NULL, 0)) + return REG_ESPACE; + mbcset->coll_syms = new_coll_syms; + *coll_sym_alloc = new_coll_sym_alloc; + } + mbcset->coll_syms[mbcset->ncoll_syms++] = idx; + return REG_NOERROR; + } + else + { + if (BE (name_len != 1, 0)) + return REG_ECOLLATE; + else + { + bitset_set (sbcset, name[0]); + return REG_NOERROR; + } + } + } +#endif + + re_token_t br_token; + re_bitset_ptr_t sbcset; +#ifdef RE_ENABLE_I18N + re_charset_t *mbcset; + int coll_sym_alloc = 0, range_alloc = 0, mbchar_alloc = 0; + int equiv_class_alloc = 0, char_class_alloc = 0; +#endif /* not RE_ENABLE_I18N */ + int non_match = 0; + bin_tree_t *work_tree; + int token_len; + int first_round = 1; +#ifdef _LIBC + collseqmb = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQMB); + nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); + if (nrules) + { + /* + if (MB_CUR_MAX > 1) + */ + collseqwc = _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQWC); + table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); + symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, + _NL_COLLATE_SYMB_TABLEMB); + extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, + _NL_COLLATE_SYMB_EXTRAMB); + } +#endif + sbcset = (re_bitset_ptr_t) calloc (sizeof (bitset_t), 1); +#ifdef RE_ENABLE_I18N + mbcset = (re_charset_t *) calloc (sizeof (re_charset_t), 1); +#endif /* RE_ENABLE_I18N */ +#ifdef RE_ENABLE_I18N + if (BE (sbcset == NULL || mbcset == NULL, 0)) +#else + if (BE (sbcset == NULL, 0)) +#endif /* RE_ENABLE_I18N */ + { + *err = REG_ESPACE; + return NULL; + } + + token_len = peek_token_bracket (token, regexp, syntax); + if (BE (token->type == END_OF_RE, 0)) + { + *err = REG_BADPAT; + goto parse_bracket_exp_free_return; + } + if (token->type == OP_NON_MATCH_LIST) + { +#ifdef RE_ENABLE_I18N + mbcset->non_match = 1; +#endif /* not RE_ENABLE_I18N */ + non_match = 1; + if (syntax & RE_HAT_LISTS_NOT_NEWLINE) + bitset_set (sbcset, '\n'); + re_string_skip_bytes (regexp, token_len); /* Skip a token. */ + token_len = peek_token_bracket (token, regexp, syntax); + if (BE (token->type == END_OF_RE, 0)) + { + *err = REG_BADPAT; + goto parse_bracket_exp_free_return; + } + } + + /* We treat the first ']' as a normal character. */ + if (token->type == OP_CLOSE_BRACKET) + token->type = CHARACTER; + + while (1) + { + bracket_elem_t start_elem, end_elem; + unsigned char start_name_buf[BRACKET_NAME_BUF_SIZE]; + unsigned char end_name_buf[BRACKET_NAME_BUF_SIZE]; + reg_errcode_t ret; + int token_len2 = 0, is_range_exp = 0; + re_token_t token2; + + start_elem.opr.name = start_name_buf; + ret = parse_bracket_element (&start_elem, regexp, token, token_len, dfa, + syntax, first_round); + if (BE (ret != REG_NOERROR, 0)) + { + *err = ret; + goto parse_bracket_exp_free_return; + } + first_round = 0; + + /* Get information about the next token. We need it in any case. */ + token_len = peek_token_bracket (token, regexp, syntax); + + /* Do not check for ranges if we know they are not allowed. */ + if (start_elem.type != CHAR_CLASS && start_elem.type != EQUIV_CLASS) + { + if (BE (token->type == END_OF_RE, 0)) + { + *err = REG_EBRACK; + goto parse_bracket_exp_free_return; + } + if (token->type == OP_CHARSET_RANGE) + { + re_string_skip_bytes (regexp, token_len); /* Skip '-'. */ + token_len2 = peek_token_bracket (&token2, regexp, syntax); + if (BE (token2.type == END_OF_RE, 0)) + { + *err = REG_EBRACK; + goto parse_bracket_exp_free_return; + } + if (token2.type == OP_CLOSE_BRACKET) + { + /* We treat the last '-' as a normal character. */ + re_string_skip_bytes (regexp, -token_len); + token->type = CHARACTER; + } + else + is_range_exp = 1; + } + } + + if (is_range_exp == 1) + { + end_elem.opr.name = end_name_buf; + ret = parse_bracket_element (&end_elem, regexp, &token2, token_len2, + dfa, syntax, 1); + if (BE (ret != REG_NOERROR, 0)) + { + *err = ret; + goto parse_bracket_exp_free_return; + } + + token_len = peek_token_bracket (token, regexp, syntax); + +#ifdef _LIBC + *err = build_range_exp (sbcset, mbcset, &range_alloc, + &start_elem, &end_elem); +#else +# ifdef RE_ENABLE_I18N + *err = build_range_exp (sbcset, + dfa->mb_cur_max > 1 ? mbcset : NULL, + &range_alloc, &start_elem, &end_elem); +# else + *err = build_range_exp (sbcset, &start_elem, &end_elem); +# endif +#endif /* RE_ENABLE_I18N */ + if (BE (*err != REG_NOERROR, 0)) + goto parse_bracket_exp_free_return; + } + else + { + switch (start_elem.type) + { + case SB_CHAR: + bitset_set (sbcset, start_elem.opr.ch); + break; +#ifdef RE_ENABLE_I18N + case MB_CHAR: + /* Check whether the array has enough space. */ + if (BE (mbchar_alloc == mbcset->nmbchars, 0)) + { + wchar_t *new_mbchars; + /* Not enough, realloc it. */ + /* +1 in case of mbcset->nmbchars is 0. */ + mbchar_alloc = 2 * mbcset->nmbchars + 1; + /* Use realloc since array is NULL if *alloc == 0. */ + new_mbchars = re_realloc (mbcset->mbchars, wchar_t, + mbchar_alloc); + if (BE (new_mbchars == NULL, 0)) + goto parse_bracket_exp_espace; + mbcset->mbchars = new_mbchars; + } + mbcset->mbchars[mbcset->nmbchars++] = start_elem.opr.wch; + break; +#endif /* RE_ENABLE_I18N */ + case EQUIV_CLASS: + *err = build_equiv_class (sbcset, +#ifdef RE_ENABLE_I18N + mbcset, &equiv_class_alloc, +#endif /* RE_ENABLE_I18N */ + start_elem.opr.name); + if (BE (*err != REG_NOERROR, 0)) + goto parse_bracket_exp_free_return; + break; + case COLL_SYM: + *err = build_collating_symbol (sbcset, +#ifdef RE_ENABLE_I18N + mbcset, &coll_sym_alloc, +#endif /* RE_ENABLE_I18N */ + start_elem.opr.name); + if (BE (*err != REG_NOERROR, 0)) + goto parse_bracket_exp_free_return; + break; + case CHAR_CLASS: + *err = build_charclass (regexp->trans, sbcset, +#ifdef RE_ENABLE_I18N + mbcset, &char_class_alloc, +#endif /* RE_ENABLE_I18N */ + (const char *) start_elem.opr.name, syntax); + if (BE (*err != REG_NOERROR, 0)) + goto parse_bracket_exp_free_return; + break; + default: + assert (0); + break; + } + } + if (BE (token->type == END_OF_RE, 0)) + { + *err = REG_EBRACK; + goto parse_bracket_exp_free_return; + } + if (token->type == OP_CLOSE_BRACKET) + break; + } + + re_string_skip_bytes (regexp, token_len); /* Skip a token. */ + + /* If it is non-matching list. */ + if (non_match) + bitset_not (sbcset); + +#ifdef RE_ENABLE_I18N + /* Ensure only single byte characters are set. */ + if (dfa->mb_cur_max > 1) + bitset_mask (sbcset, dfa->sb_char); + + if (mbcset->nmbchars || mbcset->ncoll_syms || mbcset->nequiv_classes + || mbcset->nranges || (dfa->mb_cur_max > 1 && (mbcset->nchar_classes + || mbcset->non_match))) + { + bin_tree_t *mbc_tree; + int sbc_idx; + /* Build a tree for complex bracket. */ + dfa->has_mb_node = 1; + br_token.type = COMPLEX_BRACKET; + br_token.opr.mbcset = mbcset; + mbc_tree = create_token_tree (dfa, NULL, NULL, &br_token); + if (BE (mbc_tree == NULL, 0)) + goto parse_bracket_exp_espace; + for (sbc_idx = 0; sbc_idx < BITSET_WORDS; ++sbc_idx) + if (sbcset[sbc_idx]) + break; + /* If there are no bits set in sbcset, there is no point + of having both SIMPLE_BRACKET and COMPLEX_BRACKET. */ + if (sbc_idx < BITSET_WORDS) + { + /* Build a tree for simple bracket. */ + br_token.type = SIMPLE_BRACKET; + br_token.opr.sbcset = sbcset; + work_tree = create_token_tree (dfa, NULL, NULL, &br_token); + if (BE (work_tree == NULL, 0)) + goto parse_bracket_exp_espace; + + /* Then join them by ALT node. */ + work_tree = create_tree (dfa, work_tree, mbc_tree, OP_ALT); + if (BE (work_tree == NULL, 0)) + goto parse_bracket_exp_espace; + } + else + { + re_free (sbcset); + work_tree = mbc_tree; + } + } + else +#endif /* not RE_ENABLE_I18N */ + { +#ifdef RE_ENABLE_I18N + free_charset (mbcset); +#endif + /* Build a tree for simple bracket. */ + br_token.type = SIMPLE_BRACKET; + br_token.opr.sbcset = sbcset; + work_tree = create_token_tree (dfa, NULL, NULL, &br_token); + if (BE (work_tree == NULL, 0)) + goto parse_bracket_exp_espace; + } + return work_tree; + + parse_bracket_exp_espace: + *err = REG_ESPACE; + parse_bracket_exp_free_return: + re_free (sbcset); +#ifdef RE_ENABLE_I18N + free_charset (mbcset); +#endif /* RE_ENABLE_I18N */ + return NULL; +} + +/* Parse an element in the bracket expression. */ + +static reg_errcode_t +parse_bracket_element (bracket_elem_t *elem, re_string_t *regexp, + re_token_t *token, int token_len, re_dfa_t *dfa, + reg_syntax_t syntax, int accept_hyphen) +{ +#ifdef RE_ENABLE_I18N + int cur_char_size; + cur_char_size = re_string_char_size_at (regexp, re_string_cur_idx (regexp)); + if (cur_char_size > 1) + { + elem->type = MB_CHAR; + elem->opr.wch = re_string_wchar_at (regexp, re_string_cur_idx (regexp)); + re_string_skip_bytes (regexp, cur_char_size); + return REG_NOERROR; + } +#endif /* RE_ENABLE_I18N */ + re_string_skip_bytes (regexp, token_len); /* Skip a token. */ + if (token->type == OP_OPEN_COLL_ELEM || token->type == OP_OPEN_CHAR_CLASS + || token->type == OP_OPEN_EQUIV_CLASS) + return parse_bracket_symbol (elem, regexp, token); + if (BE (token->type == OP_CHARSET_RANGE, 0) && !accept_hyphen) + { + /* A '-' must only appear as anything but a range indicator before + the closing bracket. Everything else is an error. */ + re_token_t token2; + (void) peek_token_bracket (&token2, regexp, syntax); + if (token2.type != OP_CLOSE_BRACKET) + /* The actual error value is not standardized since this whole + case is undefined. But ERANGE makes good sense. */ + return REG_ERANGE; + } + elem->type = SB_CHAR; + elem->opr.ch = token->opr.c; + return REG_NOERROR; +} + +/* Parse a bracket symbol in the bracket expression. Bracket symbols are + such as [:<character_class>:], [.<collating_element>.], and + [=<equivalent_class>=]. */ + +static reg_errcode_t +parse_bracket_symbol (bracket_elem_t *elem, re_string_t *regexp, + re_token_t *token) +{ + unsigned char ch, delim = token->opr.c; + int i = 0; + if (re_string_eoi(regexp)) + return REG_EBRACK; + for (;; ++i) + { + if (i >= BRACKET_NAME_BUF_SIZE) + return REG_EBRACK; + if (token->type == OP_OPEN_CHAR_CLASS) + ch = re_string_fetch_byte_case (regexp); + else + ch = re_string_fetch_byte (regexp); + if (re_string_eoi(regexp)) + return REG_EBRACK; + if (ch == delim && re_string_peek_byte (regexp, 0) == ']') + break; + elem->opr.name[i] = ch; + } + re_string_skip_bytes (regexp, 1); + elem->opr.name[i] = '\0'; + switch (token->type) + { + case OP_OPEN_COLL_ELEM: + elem->type = COLL_SYM; + break; + case OP_OPEN_EQUIV_CLASS: + elem->type = EQUIV_CLASS; + break; + case OP_OPEN_CHAR_CLASS: + elem->type = CHAR_CLASS; + break; + default: + break; + } + return REG_NOERROR; +} + + /* Helper function for parse_bracket_exp. + Build the equivalence class which is represented by NAME. + The result are written to MBCSET and SBCSET. + EQUIV_CLASS_ALLOC is the allocated size of mbcset->equiv_classes, + is a pointer argument since we may update it. */ + +static reg_errcode_t +#ifdef RE_ENABLE_I18N +build_equiv_class (bitset_t sbcset, re_charset_t *mbcset, + int *equiv_class_alloc, const unsigned char *name) +#else /* not RE_ENABLE_I18N */ +build_equiv_class (bitset_t sbcset, const unsigned char *name) +#endif /* not RE_ENABLE_I18N */ +{ +#ifdef _LIBC + uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); + if (nrules != 0) + { + const int32_t *table, *indirect; + const unsigned char *weights, *extra, *cp; + unsigned char char_buf[2]; + int32_t idx1, idx2; + unsigned int ch; + size_t len; + /* This #include defines a local function! */ +# include <locale/weight.h> + /* Calculate the index for equivalence class. */ + cp = name; + table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); + weights = (const unsigned char *) _NL_CURRENT (LC_COLLATE, + _NL_COLLATE_WEIGHTMB); + extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, + _NL_COLLATE_EXTRAMB); + indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, + _NL_COLLATE_INDIRECTMB); + idx1 = findidx (&cp); + if (BE (idx1 == 0 || cp < name + strlen ((const char *) name), 0)) + /* This isn't a valid character. */ + return REG_ECOLLATE; + + /* Build single byte matcing table for this equivalence class. */ + char_buf[1] = (unsigned char) '\0'; + len = weights[idx1 & 0xffffff]; + for (ch = 0; ch < SBC_MAX; ++ch) + { + char_buf[0] = ch; + cp = char_buf; + idx2 = findidx (&cp); +/* + idx2 = table[ch]; +*/ + if (idx2 == 0) + /* This isn't a valid character. */ + continue; + /* Compare only if the length matches and the collation rule + index is the same. */ + if (len == weights[idx2 & 0xffffff] && (idx1 >> 24) == (idx2 >> 24)) + { + int cnt = 0; + + while (cnt <= len && + weights[(idx1 & 0xffffff) + 1 + cnt] + == weights[(idx2 & 0xffffff) + 1 + cnt]) + ++cnt; + + if (cnt > len) + bitset_set (sbcset, ch); + } + } + /* Check whether the array has enough space. */ + if (BE (*equiv_class_alloc == mbcset->nequiv_classes, 0)) + { + /* Not enough, realloc it. */ + /* +1 in case of mbcset->nequiv_classes is 0. */ + int new_equiv_class_alloc = 2 * mbcset->nequiv_classes + 1; + /* Use realloc since the array is NULL if *alloc == 0. */ + int32_t *new_equiv_classes = re_realloc (mbcset->equiv_classes, + int32_t, + new_equiv_class_alloc); + if (BE (new_equiv_classes == NULL, 0)) + return REG_ESPACE; + mbcset->equiv_classes = new_equiv_classes; + *equiv_class_alloc = new_equiv_class_alloc; + } + mbcset->equiv_classes[mbcset->nequiv_classes++] = idx1; + } + else +#endif /* _LIBC */ + { + if (BE (strlen ((const char *) name) != 1, 0)) + return REG_ECOLLATE; + bitset_set (sbcset, *name); + } + return REG_NOERROR; +} + + /* Helper function for parse_bracket_exp. + Build the character class which is represented by NAME. + The result are written to MBCSET and SBCSET. + CHAR_CLASS_ALLOC is the allocated size of mbcset->char_classes, + is a pointer argument since we may update it. */ + +static reg_errcode_t +#ifdef RE_ENABLE_I18N +build_charclass (RE_TRANSLATE_TYPE trans, bitset_t sbcset, + re_charset_t *mbcset, int *char_class_alloc, + const char *class_name, reg_syntax_t syntax) +#else /* not RE_ENABLE_I18N */ +build_charclass (RE_TRANSLATE_TYPE trans, bitset_t sbcset, + const char *class_name, reg_syntax_t syntax) +#endif /* not RE_ENABLE_I18N */ +{ + int i; + + /* In case of REG_ICASE "upper" and "lower" match the both of + upper and lower cases. */ + if ((syntax & RE_ICASE) + && (strcmp (class_name, "upper") == 0 || strcmp (class_name, "lower") == 0)) + class_name = "alpha"; + +#ifdef RE_ENABLE_I18N + /* Check the space of the arrays. */ + if (BE (*char_class_alloc == mbcset->nchar_classes, 0)) + { + /* Not enough, realloc it. */ + /* +1 in case of mbcset->nchar_classes is 0. */ + int new_char_class_alloc = 2 * mbcset->nchar_classes + 1; + /* Use realloc since array is NULL if *alloc == 0. */ + wctype_t *new_char_classes = re_realloc (mbcset->char_classes, wctype_t, + new_char_class_alloc); + if (BE (new_char_classes == NULL, 0)) + return REG_ESPACE; + mbcset->char_classes = new_char_classes; + *char_class_alloc = new_char_class_alloc; + } + mbcset->char_classes[mbcset->nchar_classes++] = __wctype (class_name); +#endif /* RE_ENABLE_I18N */ + +#define BUILD_CHARCLASS_LOOP(ctype_func) \ + do { \ + if (BE (trans != NULL, 0)) \ + { \ + for (i = 0; i < SBC_MAX; ++i) \ + if (ctype_func (i)) \ + bitset_set (sbcset, trans[i]); \ + } \ + else \ + { \ + for (i = 0; i < SBC_MAX; ++i) \ + if (ctype_func (i)) \ + bitset_set (sbcset, i); \ + } \ + } while (0) + + if (strcmp (class_name, "alnum") == 0) + BUILD_CHARCLASS_LOOP (isalnum); + else if (strcmp (class_name, "cntrl") == 0) + BUILD_CHARCLASS_LOOP (iscntrl); + else if (strcmp (class_name, "lower") == 0) + BUILD_CHARCLASS_LOOP (islower); + else if (strcmp (class_name, "space") == 0) + BUILD_CHARCLASS_LOOP (isspace); + else if (strcmp (class_name, "alpha") == 0) + BUILD_CHARCLASS_LOOP (isalpha); + else if (strcmp (class_name, "digit") == 0) + BUILD_CHARCLASS_LOOP (isdigit); + else if (strcmp (class_name, "print") == 0) + BUILD_CHARCLASS_LOOP (isprint); + else if (strcmp (class_name, "upper") == 0) + BUILD_CHARCLASS_LOOP (isupper); + else if (strcmp (class_name, "blank") == 0) +#ifndef GAWK + BUILD_CHARCLASS_LOOP (isblank); +#else + /* see comments above */ + BUILD_CHARCLASS_LOOP (is_blank); +#endif + else if (strcmp (class_name, "graph") == 0) + BUILD_CHARCLASS_LOOP (isgraph); + else if (strcmp (class_name, "punct") == 0) + BUILD_CHARCLASS_LOOP (ispunct); + else if (strcmp (class_name, "xdigit") == 0) + BUILD_CHARCLASS_LOOP (isxdigit); + else + return REG_ECTYPE; + + return REG_NOERROR; +} + +static bin_tree_t * +build_charclass_op (re_dfa_t *dfa, RE_TRANSLATE_TYPE trans, + const char *class_name, + const char *extra, int non_match, + reg_errcode_t *err) +{ + re_bitset_ptr_t sbcset; +#ifdef RE_ENABLE_I18N + re_charset_t *mbcset; + int alloc = 0; +#endif /* not RE_ENABLE_I18N */ + reg_errcode_t ret; + re_token_t br_token; + bin_tree_t *tree; + + sbcset = (re_bitset_ptr_t) calloc (sizeof (bitset_t), 1); +#ifdef RE_ENABLE_I18N + mbcset = (re_charset_t *) calloc (sizeof (re_charset_t), 1); +#endif /* RE_ENABLE_I18N */ + +#ifdef RE_ENABLE_I18N + if (BE (sbcset == NULL || mbcset == NULL, 0)) +#else /* not RE_ENABLE_I18N */ + if (BE (sbcset == NULL, 0)) +#endif /* not RE_ENABLE_I18N */ + { + *err = REG_ESPACE; + return NULL; + } + + if (non_match) + { +#ifdef RE_ENABLE_I18N + mbcset->non_match = 1; +#endif /* not RE_ENABLE_I18N */ + } + + /* We don't care the syntax in this case. */ + ret = build_charclass (trans, sbcset, +#ifdef RE_ENABLE_I18N + mbcset, &alloc, +#endif /* RE_ENABLE_I18N */ + class_name, 0); + + if (BE (ret != REG_NOERROR, 0)) + { + re_free (sbcset); +#ifdef RE_ENABLE_I18N + free_charset (mbcset); +#endif /* RE_ENABLE_I18N */ + *err = ret; + return NULL; + } + /* \w match '_' also. */ + for (; *extra; extra++) + bitset_set (sbcset, *extra); + + /* If it is non-matching list. */ + if (non_match) + bitset_not (sbcset); + +#ifdef RE_ENABLE_I18N + /* Ensure only single byte characters are set. */ + if (dfa->mb_cur_max > 1) + bitset_mask (sbcset, dfa->sb_char); +#endif + + /* Build a tree for simple bracket. */ + br_token.type = SIMPLE_BRACKET; + br_token.opr.sbcset = sbcset; + tree = create_token_tree (dfa, NULL, NULL, &br_token); + if (BE (tree == NULL, 0)) + goto build_word_op_espace; + +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + { + bin_tree_t *mbc_tree; + /* Build a tree for complex bracket. */ + br_token.type = COMPLEX_BRACKET; + br_token.opr.mbcset = mbcset; + dfa->has_mb_node = 1; + mbc_tree = create_token_tree (dfa, NULL, NULL, &br_token); + if (BE (mbc_tree == NULL, 0)) + goto build_word_op_espace; + /* Then join them by ALT node. */ + tree = create_tree (dfa, tree, mbc_tree, OP_ALT); + if (BE (mbc_tree != NULL, 1)) + return tree; + } + else + { + free_charset (mbcset); + return tree; + } +#else /* not RE_ENABLE_I18N */ + return tree; +#endif /* not RE_ENABLE_I18N */ + + build_word_op_espace: + re_free (sbcset); +#ifdef RE_ENABLE_I18N + free_charset (mbcset); +#endif /* RE_ENABLE_I18N */ + *err = REG_ESPACE; + return NULL; +} + +/* This is intended for the expressions like "a{1,3}". + Fetch a number from `input', and return the number. + Return -1, if the number field is empty like "{,1}". + Return -2, if an error has occurred. */ + +static int +fetch_number (re_string_t *input, re_token_t *token, reg_syntax_t syntax) +{ + int num = -1; + unsigned char c; + while (1) + { + fetch_token (token, input, syntax); + c = token->opr.c; + if (BE (token->type == END_OF_RE, 0)) + return -2; + if (token->type == OP_CLOSE_DUP_NUM || c == ',') + break; + num = ((token->type != CHARACTER || c < '0' || '9' < c || num == -2) + ? -2 : ((num == -1) ? c - '0' : num * 10 + c - '0')); + num = (num > RE_DUP_MAX) ? -2 : num; + } + return num; +} + +#ifdef RE_ENABLE_I18N +static void +free_charset (re_charset_t *cset) +{ + re_free (cset->mbchars); +# ifdef _LIBC + re_free (cset->coll_syms); + re_free (cset->equiv_classes); + re_free (cset->range_starts); + re_free (cset->range_ends); +# endif + re_free (cset->char_classes); + re_free (cset); +} +#endif /* RE_ENABLE_I18N */ + +/* Functions for binary tree operation. */ + +/* Create a tree node. */ + +static bin_tree_t * +create_tree (re_dfa_t *dfa, bin_tree_t *left, bin_tree_t *right, + re_token_type_t type) +{ + re_token_t t; + t.type = type; + return create_token_tree (dfa, left, right, &t); +} + +static bin_tree_t * +create_token_tree (re_dfa_t *dfa, bin_tree_t *left, bin_tree_t *right, + const re_token_t *token) +{ + bin_tree_t *tree; + if (BE (dfa->str_tree_storage_idx == BIN_TREE_STORAGE_SIZE, 0)) + { + bin_tree_storage_t *storage = re_malloc (bin_tree_storage_t, 1); + + if (storage == NULL) + return NULL; + storage->next = dfa->str_tree_storage; + dfa->str_tree_storage = storage; + dfa->str_tree_storage_idx = 0; + } + tree = &dfa->str_tree_storage->data[dfa->str_tree_storage_idx++]; + + tree->parent = NULL; + tree->left = left; + tree->right = right; + tree->token = *token; + tree->token.duplicated = 0; + tree->token.opt_subexp = 0; + tree->first = NULL; + tree->next = NULL; + tree->node_idx = -1; + + if (left != NULL) + left->parent = tree; + if (right != NULL) + right->parent = tree; + return tree; +} + +/* Mark the tree SRC as an optional subexpression. + To be called from preorder or postorder. */ + +static reg_errcode_t +mark_opt_subexp (void *extra, bin_tree_t *node) +{ + int idx = (int) (long) extra; + if (node->token.type == SUBEXP && node->token.opr.idx == idx) + node->token.opt_subexp = 1; + + return REG_NOERROR; +} + +/* Free the allocated memory inside NODE. */ + +static void +free_token (re_token_t *node) +{ +#ifdef RE_ENABLE_I18N + if (node->type == COMPLEX_BRACKET && node->duplicated == 0) + free_charset (node->opr.mbcset); + else +#endif /* RE_ENABLE_I18N */ + if (node->type == SIMPLE_BRACKET && node->duplicated == 0) + re_free (node->opr.sbcset); +} + +/* Worker function for tree walking. Free the allocated memory inside NODE + and its children. */ + +static reg_errcode_t +free_tree (void *extra, bin_tree_t *node) +{ + free_token (&node->token); + return REG_NOERROR; +} + + +/* Duplicate the node SRC, and return new node. This is a preorder + visit similar to the one implemented by the generic visitor, but + we need more infrastructure to maintain two parallel trees --- so, + it's easier to duplicate. */ + +static bin_tree_t * +duplicate_tree (const bin_tree_t *root, re_dfa_t *dfa) +{ + const bin_tree_t *node; + bin_tree_t *dup_root; + bin_tree_t **p_new = &dup_root, *dup_node = root->parent; + + for (node = root; ; ) + { + /* Create a new tree and link it back to the current parent. */ + *p_new = create_token_tree (dfa, NULL, NULL, &node->token); + if (*p_new == NULL) + return NULL; + (*p_new)->parent = dup_node; + (*p_new)->token.duplicated = 1; + dup_node = *p_new; + + /* Go to the left node, or up and to the right. */ + if (node->left) + { + node = node->left; + p_new = &dup_node->left; + } + else + { + const bin_tree_t *prev = NULL; + while (node->right == prev || node->right == NULL) + { + prev = node; + node = node->parent; + dup_node = dup_node->parent; + if (!node) + return dup_root; + } + node = node->right; + p_new = &dup_node->right; + } + } +} diff --git a/compat/regex/regex.c b/compat/regex/regex.c new file mode 100644 index 0000000000..6aaae00327 --- /dev/null +++ b/compat/regex/regex.c @@ -0,0 +1,87 @@ +/* Extended regular expression matching and search library. + Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Isamu Hasegawa <isamu@yamato.ibm.com>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301 USA. */ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +/* Make sure no one compiles this code with a C++ compiler. */ +#ifdef __cplusplus +# error "This is C code, use a C compiler" +#endif + +#ifdef _LIBC +/* We have to keep the namespace clean. */ +# define regfree(preg) __regfree (preg) +# define regexec(pr, st, nm, pm, ef) __regexec (pr, st, nm, pm, ef) +# define regcomp(preg, pattern, cflags) __regcomp (preg, pattern, cflags) +# define regerror(errcode, preg, errbuf, errbuf_size) \ + __regerror(errcode, preg, errbuf, errbuf_size) +# define re_set_registers(bu, re, nu, st, en) \ + __re_set_registers (bu, re, nu, st, en) +# define re_match_2(bufp, string1, size1, string2, size2, pos, regs, stop) \ + __re_match_2 (bufp, string1, size1, string2, size2, pos, regs, stop) +# define re_match(bufp, string, size, pos, regs) \ + __re_match (bufp, string, size, pos, regs) +# define re_search(bufp, string, size, startpos, range, regs) \ + __re_search (bufp, string, size, startpos, range, regs) +# define re_compile_pattern(pattern, length, bufp) \ + __re_compile_pattern (pattern, length, bufp) +# define re_set_syntax(syntax) __re_set_syntax (syntax) +# define re_search_2(bufp, st1, s1, st2, s2, startpos, range, regs, stop) \ + __re_search_2 (bufp, st1, s1, st2, s2, startpos, range, regs, stop) +# define re_compile_fastmap(bufp) __re_compile_fastmap (bufp) + +# include "../locale/localeinfo.h" +#endif + +#if defined (_MSC_VER) +#include <stdio.h> /* for size_t */ +#endif + +/* On some systems, limits.h sets RE_DUP_MAX to a lower value than + GNU regex allows. Include it before <regex.h>, which correctly + #undefs RE_DUP_MAX and sets it to the right value. */ +#include <limits.h> + +#ifdef GAWK +#undef alloca +#define alloca alloca_is_bad_you_should_never_use_it +#endif +#include <regex.h> +#include "regex_internal.h" + +#include "regex_internal.c" +#ifdef GAWK +#define bool int +#define true (1) +#define false (0) +#endif +#include "regcomp.c" +#include "regexec.c" + +/* Binary backward compatibility. */ +#if _LIBC +# include <shlib-compat.h> +# if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3) +link_warning (re_max_failures, "the 're_max_failures' variable is obsolete and will go away.") +int re_max_failures = 2000; +# endif +#endif diff --git a/compat/regex/regex.h b/compat/regex/regex.h new file mode 100644 index 0000000000..61c9683872 --- /dev/null +++ b/compat/regex/regex.h @@ -0,0 +1,582 @@ +#include <stdio.h> +#include <stddef.h> + +/* Definitions for data structures and routines for the regular + expression library. + Copyright (C) 1985,1989-93,1995-98,2000,2001,2002,2003,2005,2006,2008 + Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301 USA. */ + +#ifndef _REGEX_H +#define _REGEX_H 1 + +#ifdef HAVE_STDDEF_H +#include <stddef.h> +#endif + +#ifdef HAVE_SYS_TYPES_H +#include <sys/types.h> +#endif + +#ifndef _LIBC +#define __USE_GNU 1 +#endif + +/* Allow the use in C++ code. */ +#ifdef __cplusplus +extern "C" { +#endif + +/* The following two types have to be signed and unsigned integer type + wide enough to hold a value of a pointer. For most ANSI compilers + ptrdiff_t and size_t should be likely OK. Still size of these two + types is 2 for Microsoft C. Ugh... */ +typedef long int s_reg_t; +typedef unsigned long int active_reg_t; + +/* The following bits are used to determine the regexp syntax we + recognize. The set/not-set meanings are chosen so that Emacs syntax + remains the value 0. The bits are given in alphabetical order, and + the definitions shifted by one from the previous bit; thus, when we + add or remove a bit, only one other definition need change. */ +typedef unsigned long int reg_syntax_t; + +#ifdef __USE_GNU +/* If this bit is not set, then \ inside a bracket expression is literal. + If set, then such a \ quotes the following character. */ +# define RE_BACKSLASH_ESCAPE_IN_LISTS ((unsigned long int) 1) + +/* If this bit is not set, then + and ? are operators, and \+ and \? are + literals. + If set, then \+ and \? are operators and + and ? are literals. */ +# define RE_BK_PLUS_QM (RE_BACKSLASH_ESCAPE_IN_LISTS << 1) + +/* If this bit is set, then character classes are supported. They are: + [:alpha:], [:upper:], [:lower:], [:digit:], [:alnum:], [:xdigit:], + [:space:], [:print:], [:punct:], [:graph:], and [:cntrl:]. + If not set, then character classes are not supported. */ +# define RE_CHAR_CLASSES (RE_BK_PLUS_QM << 1) + +/* If this bit is set, then ^ and $ are always anchors (outside bracket + expressions, of course). + If this bit is not set, then it depends: + ^ is an anchor if it is at the beginning of a regular + expression or after an open-group or an alternation operator; + $ is an anchor if it is at the end of a regular expression, or + before a close-group or an alternation operator. + + This bit could be (re)combined with RE_CONTEXT_INDEP_OPS, because + POSIX draft 11.2 says that * etc. in leading positions is undefined. + We already implemented a previous draft which made those constructs + invalid, though, so we haven't changed the code back. */ +# define RE_CONTEXT_INDEP_ANCHORS (RE_CHAR_CLASSES << 1) + +/* If this bit is set, then special characters are always special + regardless of where they are in the pattern. + If this bit is not set, then special characters are special only in + some contexts; otherwise they are ordinary. Specifically, + * + ? and intervals are only special when not after the beginning, + open-group, or alternation operator. */ +# define RE_CONTEXT_INDEP_OPS (RE_CONTEXT_INDEP_ANCHORS << 1) + +/* If this bit is set, then *, +, ?, and { cannot be first in an re or + immediately after an alternation or begin-group operator. */ +# define RE_CONTEXT_INVALID_OPS (RE_CONTEXT_INDEP_OPS << 1) + +/* If this bit is set, then . matches newline. + If not set, then it doesn't. */ +# define RE_DOT_NEWLINE (RE_CONTEXT_INVALID_OPS << 1) + +/* If this bit is set, then . doesn't match NUL. + If not set, then it does. */ +# define RE_DOT_NOT_NULL (RE_DOT_NEWLINE << 1) + +/* If this bit is set, nonmatching lists [^...] do not match newline. + If not set, they do. */ +# define RE_HAT_LISTS_NOT_NEWLINE (RE_DOT_NOT_NULL << 1) + +/* If this bit is set, either \{...\} or {...} defines an + interval, depending on RE_NO_BK_BRACES. + If not set, \{, \}, {, and } are literals. */ +# define RE_INTERVALS (RE_HAT_LISTS_NOT_NEWLINE << 1) + +/* If this bit is set, +, ? and | aren't recognized as operators. + If not set, they are. */ +# define RE_LIMITED_OPS (RE_INTERVALS << 1) + +/* If this bit is set, newline is an alternation operator. + If not set, newline is literal. */ +# define RE_NEWLINE_ALT (RE_LIMITED_OPS << 1) + +/* If this bit is set, then `{...}' defines an interval, and \{ and \} + are literals. + If not set, then `\{...\}' defines an interval. */ +# define RE_NO_BK_BRACES (RE_NEWLINE_ALT << 1) + +/* If this bit is set, (...) defines a group, and \( and \) are literals. + If not set, \(...\) defines a group, and ( and ) are literals. */ +# define RE_NO_BK_PARENS (RE_NO_BK_BRACES << 1) + +/* If this bit is set, then \<digit> matches <digit>. + If not set, then \<digit> is a back-reference. */ +# define RE_NO_BK_REFS (RE_NO_BK_PARENS << 1) + +/* If this bit is set, then | is an alternation operator, and \| is literal. + If not set, then \| is an alternation operator, and | is literal. */ +# define RE_NO_BK_VBAR (RE_NO_BK_REFS << 1) + +/* If this bit is set, then an ending range point collating higher + than the starting range point, as in [z-a], is invalid. + If not set, then when ending range point collates higher than the + starting range point, the range is ignored. */ +# define RE_NO_EMPTY_RANGES (RE_NO_BK_VBAR << 1) + +/* If this bit is set, then an unmatched ) is ordinary. + If not set, then an unmatched ) is invalid. */ +# define RE_UNMATCHED_RIGHT_PAREN_ORD (RE_NO_EMPTY_RANGES << 1) + +/* If this bit is set, succeed as soon as we match the whole pattern, + without further backtracking. */ +# define RE_NO_POSIX_BACKTRACKING (RE_UNMATCHED_RIGHT_PAREN_ORD << 1) + +/* If this bit is set, do not process the GNU regex operators. + If not set, then the GNU regex operators are recognized. */ +# define RE_NO_GNU_OPS (RE_NO_POSIX_BACKTRACKING << 1) + +/* If this bit is set, a syntactically invalid interval is treated as + a string of ordinary characters. For example, the ERE 'a{1' is + treated as 'a\{1'. */ +# define RE_INVALID_INTERVAL_ORD (RE_NO_GNU_OPS << 1) + +/* If this bit is set, then ignore case when matching. + If not set, then case is significant. */ +# define RE_ICASE (RE_INVALID_INTERVAL_ORD << 1) + +/* This bit is used internally like RE_CONTEXT_INDEP_ANCHORS but only + for ^, because it is difficult to scan the regex backwards to find + whether ^ should be special. */ +# define RE_CARET_ANCHORS_HERE (RE_ICASE << 1) + +/* If this bit is set, then \{ cannot be first in an bre or + immediately after an alternation or begin-group operator. */ +# define RE_CONTEXT_INVALID_DUP (RE_CARET_ANCHORS_HERE << 1) + +/* If this bit is set, then no_sub will be set to 1 during + re_compile_pattern. */ +#define RE_NO_SUB (RE_CONTEXT_INVALID_DUP << 1) +#endif + +/* This global variable defines the particular regexp syntax to use (for + some interfaces). When a regexp is compiled, the syntax used is + stored in the pattern buffer, so changing this does not affect + already-compiled regexps. */ +extern reg_syntax_t re_syntax_options; + +#ifdef __USE_GNU +/* Define combinations of the above bits for the standard possibilities. + (The [[[ comments delimit what gets put into the Texinfo file, so + don't delete them!) */ +/* [[[begin syntaxes]]] */ +#define RE_SYNTAX_EMACS 0 + +#define RE_SYNTAX_AWK \ + (RE_BACKSLASH_ESCAPE_IN_LISTS | RE_DOT_NOT_NULL \ + | RE_NO_BK_PARENS | RE_NO_BK_REFS \ + | RE_NO_BK_VBAR | RE_NO_EMPTY_RANGES \ + | RE_DOT_NEWLINE | RE_CONTEXT_INDEP_ANCHORS \ + | RE_UNMATCHED_RIGHT_PAREN_ORD | RE_NO_GNU_OPS) + +#define RE_SYNTAX_GNU_AWK \ + ((RE_SYNTAX_POSIX_EXTENDED | RE_BACKSLASH_ESCAPE_IN_LISTS \ + | RE_INVALID_INTERVAL_ORD) \ + & ~(RE_DOT_NOT_NULL | RE_CONTEXT_INDEP_OPS \ + | RE_CONTEXT_INVALID_OPS )) + +#define RE_SYNTAX_POSIX_AWK \ + (RE_SYNTAX_POSIX_EXTENDED | RE_BACKSLASH_ESCAPE_IN_LISTS \ + | RE_INTERVALS | RE_NO_GNU_OPS \ + | RE_INVALID_INTERVAL_ORD) + +#define RE_SYNTAX_GREP \ + (RE_BK_PLUS_QM | RE_CHAR_CLASSES \ + | RE_HAT_LISTS_NOT_NEWLINE | RE_INTERVALS \ + | RE_NEWLINE_ALT) + +#define RE_SYNTAX_EGREP \ + (RE_CHAR_CLASSES | RE_CONTEXT_INDEP_ANCHORS \ + | RE_CONTEXT_INDEP_OPS | RE_HAT_LISTS_NOT_NEWLINE \ + | RE_NEWLINE_ALT | RE_NO_BK_PARENS \ + | RE_NO_BK_VBAR) + +#define RE_SYNTAX_POSIX_EGREP \ + (RE_SYNTAX_EGREP | RE_INTERVALS | RE_NO_BK_BRACES \ + | RE_INVALID_INTERVAL_ORD) + +/* P1003.2/D11.2, section 4.20.7.1, lines 5078ff. */ +#define RE_SYNTAX_ED RE_SYNTAX_POSIX_BASIC + +#define RE_SYNTAX_SED RE_SYNTAX_POSIX_BASIC + +/* Syntax bits common to both basic and extended POSIX regex syntax. */ +#define _RE_SYNTAX_POSIX_COMMON \ + (RE_CHAR_CLASSES | RE_DOT_NEWLINE | RE_DOT_NOT_NULL \ + | RE_INTERVALS | RE_NO_EMPTY_RANGES) + +#define RE_SYNTAX_POSIX_BASIC \ + (_RE_SYNTAX_POSIX_COMMON | RE_BK_PLUS_QM | RE_CONTEXT_INVALID_DUP) + +/* Differs from ..._POSIX_BASIC only in that RE_BK_PLUS_QM becomes + RE_LIMITED_OPS, i.e., \? \+ \| are not recognized. Actually, this + isn't minimal, since other operators, such as \`, aren't disabled. */ +#define RE_SYNTAX_POSIX_MINIMAL_BASIC \ + (_RE_SYNTAX_POSIX_COMMON | RE_LIMITED_OPS) + +#define RE_SYNTAX_POSIX_EXTENDED \ + (_RE_SYNTAX_POSIX_COMMON | RE_CONTEXT_INDEP_ANCHORS \ + | RE_CONTEXT_INDEP_OPS | RE_NO_BK_BRACES \ + | RE_NO_BK_PARENS | RE_NO_BK_VBAR \ + | RE_CONTEXT_INVALID_OPS | RE_UNMATCHED_RIGHT_PAREN_ORD) + +/* Differs from ..._POSIX_EXTENDED in that RE_CONTEXT_INDEP_OPS is + removed and RE_NO_BK_REFS is added. */ +#define RE_SYNTAX_POSIX_MINIMAL_EXTENDED \ + (_RE_SYNTAX_POSIX_COMMON | RE_CONTEXT_INDEP_ANCHORS \ + | RE_CONTEXT_INVALID_OPS | RE_NO_BK_BRACES \ + | RE_NO_BK_PARENS | RE_NO_BK_REFS \ + | RE_NO_BK_VBAR | RE_UNMATCHED_RIGHT_PAREN_ORD) +/* [[[end syntaxes]]] */ + +/* Maximum number of duplicates an interval can allow. Some systems + (erroneously) define this in other header files, but we want our + value, so remove any previous define. */ +# ifdef RE_DUP_MAX +# undef RE_DUP_MAX +# endif +/* If sizeof(int) == 2, then ((1 << 15) - 1) overflows. */ +# define RE_DUP_MAX (0x7fff) +#endif + + +/* POSIX `cflags' bits (i.e., information for `regcomp'). */ + +/* If this bit is set, then use extended regular expression syntax. + If not set, then use basic regular expression syntax. */ +#define REG_EXTENDED 1 + +/* If this bit is set, then ignore case when matching. + If not set, then case is significant. */ +#define REG_ICASE (REG_EXTENDED << 1) + +/* If this bit is set, then anchors do not match at newline + characters in the string. + If not set, then anchors do match at newlines. */ +#define REG_NEWLINE (REG_ICASE << 1) + +/* If this bit is set, then report only success or fail in regexec. + If not set, then returns differ between not matching and errors. */ +#define REG_NOSUB (REG_NEWLINE << 1) + + +/* POSIX `eflags' bits (i.e., information for regexec). */ + +/* If this bit is set, then the beginning-of-line operator doesn't match + the beginning of the string (presumably because it's not the + beginning of a line). + If not set, then the beginning-of-line operator does match the + beginning of the string. */ +#define REG_NOTBOL 1 + +/* Like REG_NOTBOL, except for the end-of-line. */ +#define REG_NOTEOL (1 << 1) + +/* Use PMATCH[0] to delimit the start and end of the search in the + buffer. */ +#define REG_STARTEND (1 << 2) + + +/* If any error codes are removed, changed, or added, update the + `re_error_msg' table in regex.c. */ +typedef enum +{ +#if defined _XOPEN_SOURCE || defined __USE_XOPEN2K + REG_ENOSYS = -1, /* This will never happen for this implementation. */ +#endif + + REG_NOERROR = 0, /* Success. */ + REG_NOMATCH, /* Didn't find a match (for regexec). */ + + /* POSIX regcomp return error codes. (In the order listed in the + standard.) */ + REG_BADPAT, /* Invalid pattern. */ + REG_ECOLLATE, /* Inalid collating element. */ + REG_ECTYPE, /* Invalid character class name. */ + REG_EESCAPE, /* Trailing backslash. */ + REG_ESUBREG, /* Invalid back reference. */ + REG_EBRACK, /* Unmatched left bracket. */ + REG_EPAREN, /* Parenthesis imbalance. */ + REG_EBRACE, /* Unmatched \{. */ + REG_BADBR, /* Invalid contents of \{\}. */ + REG_ERANGE, /* Invalid range end. */ + REG_ESPACE, /* Ran out of memory. */ + REG_BADRPT, /* No preceding re for repetition op. */ + + /* Error codes we've added. */ + REG_EEND, /* Premature end. */ + REG_ESIZE, /* Compiled pattern bigger than 2^16 bytes. */ + REG_ERPAREN /* Unmatched ) or \); not returned from regcomp. */ +} reg_errcode_t; + +/* This data structure represents a compiled pattern. Before calling + the pattern compiler, the fields `buffer', `allocated', `fastmap', + `translate', and `no_sub' can be set. After the pattern has been + compiled, the `re_nsub' field is available. All other fields are + private to the regex routines. */ + +#ifndef RE_TRANSLATE_TYPE +# define __RE_TRANSLATE_TYPE unsigned char * +# ifdef __USE_GNU +# define RE_TRANSLATE_TYPE __RE_TRANSLATE_TYPE +# endif +#endif + +#ifdef __USE_GNU +# define __REPB_PREFIX(name) name +#else +# define __REPB_PREFIX(name) __##name +#endif + +struct re_pattern_buffer +{ + /* Space that holds the compiled pattern. It is declared as + `unsigned char *' because its elements are sometimes used as + array indexes. */ + unsigned char *__REPB_PREFIX(buffer); + + /* Number of bytes to which `buffer' points. */ + unsigned long int __REPB_PREFIX(allocated); + + /* Number of bytes actually used in `buffer'. */ + unsigned long int __REPB_PREFIX(used); + + /* Syntax setting with which the pattern was compiled. */ + reg_syntax_t __REPB_PREFIX(syntax); + + /* Pointer to a fastmap, if any, otherwise zero. re_search uses the + fastmap, if there is one, to skip over impossible starting points + for matches. */ + char *__REPB_PREFIX(fastmap); + + /* Either a translate table to apply to all characters before + comparing them, or zero for no translation. The translation is + applied to a pattern when it is compiled and to a string when it + is matched. */ + __RE_TRANSLATE_TYPE __REPB_PREFIX(translate); + + /* Number of subexpressions found by the compiler. */ + size_t re_nsub; + + /* Zero if this pattern cannot match the empty string, one else. + Well, in truth it's used only in `re_search_2', to see whether or + not we should use the fastmap, so we don't set this absolutely + perfectly; see `re_compile_fastmap' (the `duplicate' case). */ + unsigned __REPB_PREFIX(can_be_null) : 1; + + /* If REGS_UNALLOCATED, allocate space in the `regs' structure + for `max (RE_NREGS, re_nsub + 1)' groups. + If REGS_REALLOCATE, reallocate space if necessary. + If REGS_FIXED, use what's there. */ +#ifdef __USE_GNU +# define REGS_UNALLOCATED 0 +# define REGS_REALLOCATE 1 +# define REGS_FIXED 2 +#endif + unsigned __REPB_PREFIX(regs_allocated) : 2; + + /* Set to zero when `regex_compile' compiles a pattern; set to one + by `re_compile_fastmap' if it updates the fastmap. */ + unsigned __REPB_PREFIX(fastmap_accurate) : 1; + + /* If set, `re_match_2' does not return information about + subexpressions. */ + unsigned __REPB_PREFIX(no_sub) : 1; + + /* If set, a beginning-of-line anchor doesn't match at the beginning + of the string. */ + unsigned __REPB_PREFIX(not_bol) : 1; + + /* Similarly for an end-of-line anchor. */ + unsigned __REPB_PREFIX(not_eol) : 1; + + /* If true, an anchor at a newline matches. */ + unsigned __REPB_PREFIX(newline_anchor) : 1; +}; + +typedef struct re_pattern_buffer regex_t; + +/* Type for byte offsets within the string. POSIX mandates this. */ +typedef int regoff_t; + + +#ifdef __USE_GNU +/* This is the structure we store register match data in. See + regex.texinfo for a full description of what registers match. */ +struct re_registers +{ + unsigned num_regs; + regoff_t *start; + regoff_t *end; +}; + + +/* If `regs_allocated' is REGS_UNALLOCATED in the pattern buffer, + `re_match_2' returns information about at least this many registers + the first time a `regs' structure is passed. */ +# ifndef RE_NREGS +# define RE_NREGS 30 +# endif +#endif + + +/* POSIX specification for registers. Aside from the different names than + `re_registers', POSIX uses an array of structures, instead of a + structure of arrays. */ +typedef struct +{ + regoff_t rm_so; /* Byte offset from string's start to substring's start. */ + regoff_t rm_eo; /* Byte offset from string's start to substring's end. */ +} regmatch_t; + +/* Declarations for routines. */ + +#ifdef __USE_GNU +/* Sets the current default syntax to SYNTAX, and return the old syntax. + You can also simply assign to the `re_syntax_options' variable. */ +extern reg_syntax_t re_set_syntax (reg_syntax_t __syntax); + +/* Compile the regular expression PATTERN, with length LENGTH + and syntax given by the global `re_syntax_options', into the buffer + BUFFER. Return NULL if successful, and an error string if not. */ +extern const char *re_compile_pattern (const char *__pattern, size_t __length, + struct re_pattern_buffer *__buffer); + + +/* Compile a fastmap for the compiled pattern in BUFFER; used to + accelerate searches. Return 0 if successful and -2 if was an + internal error. */ +extern int re_compile_fastmap (struct re_pattern_buffer *__buffer); + + +/* Search in the string STRING (with length LENGTH) for the pattern + compiled into BUFFER. Start searching at position START, for RANGE + characters. Return the starting position of the match, -1 for no + match, or -2 for an internal error. Also return register + information in REGS (if REGS and BUFFER->no_sub are nonzero). */ +extern int re_search (struct re_pattern_buffer *__buffer, const char *__cstring, + int __length, int __start, int __range, + struct re_registers *__regs); + + +/* Like `re_search', but search in the concatenation of STRING1 and + STRING2. Also, stop searching at index START + STOP. */ +extern int re_search_2 (struct re_pattern_buffer *__buffer, + const char *__string1, int __length1, + const char *__string2, int __length2, int __start, + int __range, struct re_registers *__regs, int __stop); + + +/* Like `re_search', but return how many characters in STRING the regexp + in BUFFER matched, starting at position START. */ +extern int re_match (struct re_pattern_buffer *__buffer, const char *__cstring, + int __length, int __start, struct re_registers *__regs); + + +/* Relates to `re_match' as `re_search_2' relates to `re_search'. */ +extern int re_match_2 (struct re_pattern_buffer *__buffer, + const char *__string1, int __length1, + const char *__string2, int __length2, int __start, + struct re_registers *__regs, int __stop); + + +/* Set REGS to hold NUM_REGS registers, storing them in STARTS and + ENDS. Subsequent matches using BUFFER and REGS will use this memory + for recording register information. STARTS and ENDS must be + allocated with malloc, and must each be at least `NUM_REGS * sizeof + (regoff_t)' bytes long. + + If NUM_REGS == 0, then subsequent matches should allocate their own + register data. + + Unless this function is called, the first search or match using + PATTERN_BUFFER will allocate its own register data, without + freeing the old data. */ +extern void re_set_registers (struct re_pattern_buffer *__buffer, + struct re_registers *__regs, + unsigned int __num_regs, + regoff_t *__starts, regoff_t *__ends); +#endif /* Use GNU */ + +#if defined _REGEX_RE_COMP || (defined _LIBC && defined __USE_BSD) +# ifndef _CRAY +/* 4.2 bsd compatibility. */ +extern char *re_comp (const char *); +extern int re_exec (const char *); +# endif +#endif + +/* GCC 2.95 and later have "__restrict"; C99 compilers have + "restrict", and "configure" may have defined "restrict". */ +#ifndef __restrict +# if ! (2 < __GNUC__ || (2 == __GNUC__ && 95 <= __GNUC_MINOR__)) +# if defined restrict || 199901L <= __STDC_VERSION__ +# define __restrict restrict +# else +# define __restrict +# endif +# endif +#endif +/* gcc 3.1 and up support the [restrict] syntax. */ +#ifndef __restrict_arr +# if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) \ + && !defined __GNUG__ +# define __restrict_arr __restrict +# else +# define __restrict_arr +# endif +#endif + +/* POSIX compatibility. */ +extern int regcomp (regex_t *__restrict __preg, + const char *__restrict __pattern, + int __cflags); + +extern int regexec (const regex_t *__restrict __preg, + const char *__restrict __cstring, size_t __nmatch, + regmatch_t __pmatch[__restrict_arr], + int __eflags); + +extern size_t regerror (int __errcode, const regex_t *__restrict __preg, + char *__restrict __errbuf, size_t __errbuf_size); + +extern void regfree (regex_t *__preg); + + +#ifdef __cplusplus +} +#endif /* C++ */ + +#endif /* regex.h */ diff --git a/compat/regex/regex_internal.c b/compat/regex/regex_internal.c new file mode 100644 index 0000000000..d4121f2f4f --- /dev/null +++ b/compat/regex/regex_internal.c @@ -0,0 +1,1744 @@ +/* Extended regular expression matching and search library. + Copyright (C) 2002-2006, 2010 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Isamu Hasegawa <isamu@yamato.ibm.com>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301 USA. */ + +static void re_string_construct_common (const char *str, int len, + re_string_t *pstr, + RE_TRANSLATE_TYPE trans, int icase, + const re_dfa_t *dfa) internal_function; +static re_dfastate_t *create_ci_newstate (const re_dfa_t *dfa, + const re_node_set *nodes, + unsigned int hash) internal_function; +static re_dfastate_t *create_cd_newstate (const re_dfa_t *dfa, + const re_node_set *nodes, + unsigned int context, + unsigned int hash) internal_function; + +#ifdef GAWK +#undef MAX /* safety */ +static int +MAX(size_t a, size_t b) +{ + return (a > b ? a : b); +} +#endif + +/* Functions for string operation. */ + +/* This function allocate the buffers. It is necessary to call + re_string_reconstruct before using the object. */ + +static reg_errcode_t +internal_function +re_string_allocate (re_string_t *pstr, const char *str, int len, int init_len, + RE_TRANSLATE_TYPE trans, int icase, const re_dfa_t *dfa) +{ + reg_errcode_t ret; + int init_buf_len; + + /* Ensure at least one character fits into the buffers. */ + if (init_len < dfa->mb_cur_max) + init_len = dfa->mb_cur_max; + init_buf_len = (len + 1 < init_len) ? len + 1: init_len; + re_string_construct_common (str, len, pstr, trans, icase, dfa); + + ret = re_string_realloc_buffers (pstr, init_buf_len); + if (BE (ret != REG_NOERROR, 0)) + return ret; + + pstr->word_char = dfa->word_char; + pstr->word_ops_used = dfa->word_ops_used; + pstr->mbs = pstr->mbs_allocated ? pstr->mbs : (unsigned char *) str; + pstr->valid_len = (pstr->mbs_allocated || dfa->mb_cur_max > 1) ? 0 : len; + pstr->valid_raw_len = pstr->valid_len; + return REG_NOERROR; +} + +/* This function allocate the buffers, and initialize them. */ + +static reg_errcode_t +internal_function +re_string_construct (re_string_t *pstr, const char *str, int len, + RE_TRANSLATE_TYPE trans, int icase, const re_dfa_t *dfa) +{ + reg_errcode_t ret; + memset (pstr, '\0', sizeof (re_string_t)); + re_string_construct_common (str, len, pstr, trans, icase, dfa); + + if (len > 0) + { + ret = re_string_realloc_buffers (pstr, len + 1); + if (BE (ret != REG_NOERROR, 0)) + return ret; + } + pstr->mbs = pstr->mbs_allocated ? pstr->mbs : (unsigned char *) str; + + if (icase) + { +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + { + while (1) + { + ret = build_wcs_upper_buffer (pstr); + if (BE (ret != REG_NOERROR, 0)) + return ret; + if (pstr->valid_raw_len >= len) + break; + if (pstr->bufs_len > pstr->valid_len + dfa->mb_cur_max) + break; + ret = re_string_realloc_buffers (pstr, pstr->bufs_len * 2); + if (BE (ret != REG_NOERROR, 0)) + return ret; + } + } + else +#endif /* RE_ENABLE_I18N */ + build_upper_buffer (pstr); + } + else + { +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + build_wcs_buffer (pstr); + else +#endif /* RE_ENABLE_I18N */ + { + if (trans != NULL) + re_string_translate_buffer (pstr); + else + { + pstr->valid_len = pstr->bufs_len; + pstr->valid_raw_len = pstr->bufs_len; + } + } + } + + return REG_NOERROR; +} + +/* Helper functions for re_string_allocate, and re_string_construct. */ + +static reg_errcode_t +internal_function +re_string_realloc_buffers (re_string_t *pstr, int new_buf_len) +{ +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + { + wint_t *new_wcs; + + /* Avoid overflow in realloc. */ + const size_t max_object_size = MAX (sizeof (wint_t), sizeof (int)); + if (BE (SIZE_MAX / max_object_size < new_buf_len, 0)) + return REG_ESPACE; + + new_wcs = re_realloc (pstr->wcs, wint_t, new_buf_len); + if (BE (new_wcs == NULL, 0)) + return REG_ESPACE; + pstr->wcs = new_wcs; + if (pstr->offsets != NULL) + { + int *new_offsets = re_realloc (pstr->offsets, int, new_buf_len); + if (BE (new_offsets == NULL, 0)) + return REG_ESPACE; + pstr->offsets = new_offsets; + } + } +#endif /* RE_ENABLE_I18N */ + if (pstr->mbs_allocated) + { + unsigned char *new_mbs = re_realloc (pstr->mbs, unsigned char, + new_buf_len); + if (BE (new_mbs == NULL, 0)) + return REG_ESPACE; + pstr->mbs = new_mbs; + } + pstr->bufs_len = new_buf_len; + return REG_NOERROR; +} + + +static void +internal_function +re_string_construct_common (const char *str, int len, re_string_t *pstr, + RE_TRANSLATE_TYPE trans, int icase, + const re_dfa_t *dfa) +{ + pstr->raw_mbs = (const unsigned char *) str; + pstr->len = len; + pstr->raw_len = len; + pstr->trans = trans; + pstr->icase = icase ? 1 : 0; + pstr->mbs_allocated = (trans != NULL || icase); + pstr->mb_cur_max = dfa->mb_cur_max; + pstr->is_utf8 = dfa->is_utf8; + pstr->map_notascii = dfa->map_notascii; + pstr->stop = pstr->len; + pstr->raw_stop = pstr->stop; +} + +#ifdef RE_ENABLE_I18N + +/* Build wide character buffer PSTR->WCS. + If the byte sequence of the string are: + <mb1>(0), <mb1>(1), <mb2>(0), <mb2>(1), <sb3> + Then wide character buffer will be: + <wc1> , WEOF , <wc2> , WEOF , <wc3> + We use WEOF for padding, they indicate that the position isn't + a first byte of a multibyte character. + + Note that this function assumes PSTR->VALID_LEN elements are already + built and starts from PSTR->VALID_LEN. */ + +static void +internal_function +build_wcs_buffer (re_string_t *pstr) +{ +#ifdef _LIBC + unsigned char buf[MB_LEN_MAX]; + assert (MB_LEN_MAX >= pstr->mb_cur_max); +#else + unsigned char buf[64]; +#endif + mbstate_t prev_st; + int byte_idx, end_idx, remain_len; + size_t mbclen; + + /* Build the buffers from pstr->valid_len to either pstr->len or + pstr->bufs_len. */ + end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; + for (byte_idx = pstr->valid_len; byte_idx < end_idx;) + { + wchar_t wc; + const char *p; + + remain_len = end_idx - byte_idx; + prev_st = pstr->cur_state; + /* Apply the translation if we need. */ + if (BE (pstr->trans != NULL, 0)) + { + int i, ch; + + for (i = 0; i < pstr->mb_cur_max && i < remain_len; ++i) + { + ch = pstr->raw_mbs [pstr->raw_mbs_idx + byte_idx + i]; + buf[i] = pstr->mbs[byte_idx + i] = pstr->trans[ch]; + } + p = (const char *) buf; + } + else + p = (const char *) pstr->raw_mbs + pstr->raw_mbs_idx + byte_idx; + mbclen = __mbrtowc (&wc, p, remain_len, &pstr->cur_state); + if (BE (mbclen == (size_t) -2, 0)) + { + /* The buffer doesn't have enough space, finish to build. */ + pstr->cur_state = prev_st; + break; + } + else if (BE (mbclen == (size_t) -1 || mbclen == 0, 0)) + { + /* We treat these cases as a singlebyte character. */ + mbclen = 1; + wc = (wchar_t) pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]; + if (BE (pstr->trans != NULL, 0)) + wc = pstr->trans[wc]; + pstr->cur_state = prev_st; + } + + /* Write wide character and padding. */ + pstr->wcs[byte_idx++] = wc; + /* Write paddings. */ + for (remain_len = byte_idx + mbclen - 1; byte_idx < remain_len ;) + pstr->wcs[byte_idx++] = WEOF; + } + pstr->valid_len = byte_idx; + pstr->valid_raw_len = byte_idx; +} + +/* Build wide character buffer PSTR->WCS like build_wcs_buffer, + but for REG_ICASE. */ + +static reg_errcode_t +internal_function +build_wcs_upper_buffer (re_string_t *pstr) +{ + mbstate_t prev_st; + int src_idx, byte_idx, end_idx, remain_len; + size_t mbclen; +#ifdef _LIBC + char buf[MB_LEN_MAX]; + assert (MB_LEN_MAX >= pstr->mb_cur_max); +#else + char buf[64]; +#endif + + byte_idx = pstr->valid_len; + end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; + + /* The following optimization assumes that ASCII characters can be + mapped to wide characters with a simple cast. */ + if (! pstr->map_notascii && pstr->trans == NULL && !pstr->offsets_needed) + { + while (byte_idx < end_idx) + { + wchar_t wc; + + if (isascii (pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]) + && mbsinit (&pstr->cur_state)) + { + /* In case of a singlebyte character. */ + pstr->mbs[byte_idx] + = toupper (pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]); + /* The next step uses the assumption that wchar_t is encoded + ASCII-safe: all ASCII values can be converted like this. */ + pstr->wcs[byte_idx] = (wchar_t) pstr->mbs[byte_idx]; + ++byte_idx; + continue; + } + + remain_len = end_idx - byte_idx; + prev_st = pstr->cur_state; + mbclen = __mbrtowc (&wc, + ((const char *) pstr->raw_mbs + pstr->raw_mbs_idx + + byte_idx), remain_len, &pstr->cur_state); + if (BE (mbclen + 2 > 2, 1)) + { + wchar_t wcu = wc; + if (iswlower (wc)) + { + size_t mbcdlen; + + wcu = towupper (wc); + mbcdlen = wcrtomb (buf, wcu, &prev_st); + if (BE (mbclen == mbcdlen, 1)) + memcpy (pstr->mbs + byte_idx, buf, mbclen); + else + { + src_idx = byte_idx; + goto offsets_needed; + } + } + else + memcpy (pstr->mbs + byte_idx, + pstr->raw_mbs + pstr->raw_mbs_idx + byte_idx, mbclen); + pstr->wcs[byte_idx++] = wcu; + /* Write paddings. */ + for (remain_len = byte_idx + mbclen - 1; byte_idx < remain_len ;) + pstr->wcs[byte_idx++] = WEOF; + } + else if (mbclen == (size_t) -1 || mbclen == 0) + { + /* It is an invalid character or '\0'. Just use the byte. */ + int ch = pstr->raw_mbs[pstr->raw_mbs_idx + byte_idx]; + pstr->mbs[byte_idx] = ch; + /* And also cast it to wide char. */ + pstr->wcs[byte_idx++] = (wchar_t) ch; + if (BE (mbclen == (size_t) -1, 0)) + pstr->cur_state = prev_st; + } + else + { + /* The buffer doesn't have enough space, finish to build. */ + pstr->cur_state = prev_st; + break; + } + } + pstr->valid_len = byte_idx; + pstr->valid_raw_len = byte_idx; + return REG_NOERROR; + } + else + for (src_idx = pstr->valid_raw_len; byte_idx < end_idx;) + { + wchar_t wc; + const char *p; + offsets_needed: + remain_len = end_idx - byte_idx; + prev_st = pstr->cur_state; + if (BE (pstr->trans != NULL, 0)) + { + int i, ch; + + for (i = 0; i < pstr->mb_cur_max && i < remain_len; ++i) + { + ch = pstr->raw_mbs [pstr->raw_mbs_idx + src_idx + i]; + buf[i] = pstr->trans[ch]; + } + p = (const char *) buf; + } + else + p = (const char *) pstr->raw_mbs + pstr->raw_mbs_idx + src_idx; + mbclen = __mbrtowc (&wc, p, remain_len, &pstr->cur_state); + if (BE (mbclen + 2 > 2, 1)) + { + wchar_t wcu = wc; + if (iswlower (wc)) + { + size_t mbcdlen; + + wcu = towupper (wc); + mbcdlen = wcrtomb ((char *) buf, wcu, &prev_st); + if (BE (mbclen == mbcdlen, 1)) + memcpy (pstr->mbs + byte_idx, buf, mbclen); + else if (mbcdlen != (size_t) -1) + { + size_t i; + + if (byte_idx + mbcdlen > pstr->bufs_len) + { + pstr->cur_state = prev_st; + break; + } + + if (pstr->offsets == NULL) + { + pstr->offsets = re_malloc (int, pstr->bufs_len); + + if (pstr->offsets == NULL) + return REG_ESPACE; + } + if (!pstr->offsets_needed) + { + for (i = 0; i < (size_t) byte_idx; ++i) + pstr->offsets[i] = i; + pstr->offsets_needed = 1; + } + + memcpy (pstr->mbs + byte_idx, buf, mbcdlen); + pstr->wcs[byte_idx] = wcu; + pstr->offsets[byte_idx] = src_idx; + for (i = 1; i < mbcdlen; ++i) + { + pstr->offsets[byte_idx + i] + = src_idx + (i < mbclen ? i : mbclen - 1); + pstr->wcs[byte_idx + i] = WEOF; + } + pstr->len += mbcdlen - mbclen; + if (pstr->raw_stop > src_idx) + pstr->stop += mbcdlen - mbclen; + end_idx = (pstr->bufs_len > pstr->len) + ? pstr->len : pstr->bufs_len; + byte_idx += mbcdlen; + src_idx += mbclen; + continue; + } + else + memcpy (pstr->mbs + byte_idx, p, mbclen); + } + else + memcpy (pstr->mbs + byte_idx, p, mbclen); + + if (BE (pstr->offsets_needed != 0, 0)) + { + size_t i; + for (i = 0; i < mbclen; ++i) + pstr->offsets[byte_idx + i] = src_idx + i; + } + src_idx += mbclen; + + pstr->wcs[byte_idx++] = wcu; + /* Write paddings. */ + for (remain_len = byte_idx + mbclen - 1; byte_idx < remain_len ;) + pstr->wcs[byte_idx++] = WEOF; + } + else if (mbclen == (size_t) -1 || mbclen == 0) + { + /* It is an invalid character or '\0'. Just use the byte. */ + int ch = pstr->raw_mbs[pstr->raw_mbs_idx + src_idx]; + + if (BE (pstr->trans != NULL, 0)) + ch = pstr->trans [ch]; + pstr->mbs[byte_idx] = ch; + + if (BE (pstr->offsets_needed != 0, 0)) + pstr->offsets[byte_idx] = src_idx; + ++src_idx; + + /* And also cast it to wide char. */ + pstr->wcs[byte_idx++] = (wchar_t) ch; + if (BE (mbclen == (size_t) -1, 0)) + pstr->cur_state = prev_st; + } + else + { + /* The buffer doesn't have enough space, finish to build. */ + pstr->cur_state = prev_st; + break; + } + } + pstr->valid_len = byte_idx; + pstr->valid_raw_len = src_idx; + return REG_NOERROR; +} + +/* Skip characters until the index becomes greater than NEW_RAW_IDX. + Return the index. */ + +static int +internal_function +re_string_skip_chars (re_string_t *pstr, int new_raw_idx, wint_t *last_wc) +{ + mbstate_t prev_st; + int rawbuf_idx; + size_t mbclen; + wint_t wc = WEOF; + + /* Skip the characters which are not necessary to check. */ + for (rawbuf_idx = pstr->raw_mbs_idx + pstr->valid_raw_len; + rawbuf_idx < new_raw_idx;) + { + wchar_t wc2; + int remain_len = pstr->len - rawbuf_idx; + prev_st = pstr->cur_state; + mbclen = __mbrtowc (&wc2, (const char *) pstr->raw_mbs + rawbuf_idx, + remain_len, &pstr->cur_state); + if (BE (mbclen == (size_t) -2 || mbclen == (size_t) -1 || mbclen == 0, 0)) + { + /* We treat these cases as a single byte character. */ + if (mbclen == 0 || remain_len == 0) + wc = L'\0'; + else + wc = *(unsigned char *) (pstr->raw_mbs + rawbuf_idx); + mbclen = 1; + pstr->cur_state = prev_st; + } + else + wc = (wint_t) wc2; + /* Then proceed the next character. */ + rawbuf_idx += mbclen; + } + *last_wc = (wint_t) wc; + return rawbuf_idx; +} +#endif /* RE_ENABLE_I18N */ + +/* Build the buffer PSTR->MBS, and apply the translation if we need. + This function is used in case of REG_ICASE. */ + +static void +internal_function +build_upper_buffer (re_string_t *pstr) +{ + int char_idx, end_idx; + end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; + + for (char_idx = pstr->valid_len; char_idx < end_idx; ++char_idx) + { + int ch = pstr->raw_mbs[pstr->raw_mbs_idx + char_idx]; + if (BE (pstr->trans != NULL, 0)) + ch = pstr->trans[ch]; + if (islower (ch)) + pstr->mbs[char_idx] = toupper (ch); + else + pstr->mbs[char_idx] = ch; + } + pstr->valid_len = char_idx; + pstr->valid_raw_len = char_idx; +} + +/* Apply TRANS to the buffer in PSTR. */ + +static void +internal_function +re_string_translate_buffer (re_string_t *pstr) +{ + int buf_idx, end_idx; + end_idx = (pstr->bufs_len > pstr->len) ? pstr->len : pstr->bufs_len; + + for (buf_idx = pstr->valid_len; buf_idx < end_idx; ++buf_idx) + { + int ch = pstr->raw_mbs[pstr->raw_mbs_idx + buf_idx]; + pstr->mbs[buf_idx] = pstr->trans[ch]; + } + + pstr->valid_len = buf_idx; + pstr->valid_raw_len = buf_idx; +} + +/* This function re-construct the buffers. + Concretely, convert to wide character in case of pstr->mb_cur_max > 1, + convert to upper case in case of REG_ICASE, apply translation. */ + +static reg_errcode_t +internal_function +re_string_reconstruct (re_string_t *pstr, int idx, int eflags) +{ + int offset = idx - pstr->raw_mbs_idx; + if (BE (offset < 0, 0)) + { + /* Reset buffer. */ +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + memset (&pstr->cur_state, '\0', sizeof (mbstate_t)); +#endif /* RE_ENABLE_I18N */ + pstr->len = pstr->raw_len; + pstr->stop = pstr->raw_stop; + pstr->valid_len = 0; + pstr->raw_mbs_idx = 0; + pstr->valid_raw_len = 0; + pstr->offsets_needed = 0; + pstr->tip_context = ((eflags & REG_NOTBOL) ? CONTEXT_BEGBUF + : CONTEXT_NEWLINE | CONTEXT_BEGBUF); + if (!pstr->mbs_allocated) + pstr->mbs = (unsigned char *) pstr->raw_mbs; + offset = idx; + } + + if (BE (offset != 0, 1)) + { + /* Should the already checked characters be kept? */ + if (BE (offset < pstr->valid_raw_len, 1)) + { + /* Yes, move them to the front of the buffer. */ +#ifdef RE_ENABLE_I18N + if (BE (pstr->offsets_needed, 0)) + { + int low = 0, high = pstr->valid_len, mid; + do + { + mid = (high + low) / 2; + if (pstr->offsets[mid] > offset) + high = mid; + else if (pstr->offsets[mid] < offset) + low = mid + 1; + else + break; + } + while (low < high); + if (pstr->offsets[mid] < offset) + ++mid; + pstr->tip_context = re_string_context_at (pstr, mid - 1, + eflags); + /* This can be quite complicated, so handle specially + only the common and easy case where the character with + different length representation of lower and upper + case is present at or after offset. */ + if (pstr->valid_len > offset + && mid == offset && pstr->offsets[mid] == offset) + { + memmove (pstr->wcs, pstr->wcs + offset, + (pstr->valid_len - offset) * sizeof (wint_t)); + memmove (pstr->mbs, pstr->mbs + offset, pstr->valid_len - offset); + pstr->valid_len -= offset; + pstr->valid_raw_len -= offset; + for (low = 0; low < pstr->valid_len; low++) + pstr->offsets[low] = pstr->offsets[low + offset] - offset; + } + else + { + /* Otherwise, just find out how long the partial multibyte + character at offset is and fill it with WEOF/255. */ + pstr->len = pstr->raw_len - idx + offset; + pstr->stop = pstr->raw_stop - idx + offset; + pstr->offsets_needed = 0; + while (mid > 0 && pstr->offsets[mid - 1] == offset) + --mid; + while (mid < pstr->valid_len) + if (pstr->wcs[mid] != WEOF) + break; + else + ++mid; + if (mid == pstr->valid_len) + pstr->valid_len = 0; + else + { + pstr->valid_len = pstr->offsets[mid] - offset; + if (pstr->valid_len) + { + for (low = 0; low < pstr->valid_len; ++low) + pstr->wcs[low] = WEOF; + memset (pstr->mbs, 255, pstr->valid_len); + } + } + pstr->valid_raw_len = pstr->valid_len; + } + } + else +#endif + { + pstr->tip_context = re_string_context_at (pstr, offset - 1, + eflags); +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + memmove (pstr->wcs, pstr->wcs + offset, + (pstr->valid_len - offset) * sizeof (wint_t)); +#endif /* RE_ENABLE_I18N */ + if (BE (pstr->mbs_allocated, 0)) + memmove (pstr->mbs, pstr->mbs + offset, + pstr->valid_len - offset); + pstr->valid_len -= offset; + pstr->valid_raw_len -= offset; +#if DEBUG + assert (pstr->valid_len > 0); +#endif + } + } + else + { +#ifdef RE_ENABLE_I18N + /* No, skip all characters until IDX. */ + int prev_valid_len = pstr->valid_len; + + if (BE (pstr->offsets_needed, 0)) + { + pstr->len = pstr->raw_len - idx + offset; + pstr->stop = pstr->raw_stop - idx + offset; + pstr->offsets_needed = 0; + } +#endif + pstr->valid_len = 0; +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + { + int wcs_idx; + wint_t wc = WEOF; + + if (pstr->is_utf8) + { + const unsigned char *raw, *p, *end; + + /* Special case UTF-8. Multi-byte chars start with any + byte other than 0x80 - 0xbf. */ + raw = pstr->raw_mbs + pstr->raw_mbs_idx; + end = raw + (offset - pstr->mb_cur_max); + if (end < pstr->raw_mbs) + end = pstr->raw_mbs; + p = raw + offset - 1; +#ifdef _LIBC + /* We know the wchar_t encoding is UCS4, so for the simple + case, ASCII characters, skip the conversion step. */ + if (isascii (*p) && BE (pstr->trans == NULL, 1)) + { + memset (&pstr->cur_state, '\0', sizeof (mbstate_t)); + /* pstr->valid_len = 0; */ + wc = (wchar_t) *p; + } + else +#endif + for (; p >= end; --p) + if ((*p & 0xc0) != 0x80) + { + mbstate_t cur_state; + wchar_t wc2; + int mlen = raw + pstr->len - p; + unsigned char buf[6]; + size_t mbclen; + + if (BE (pstr->trans != NULL, 0)) + { + int i = mlen < 6 ? mlen : 6; + while (--i >= 0) + buf[i] = pstr->trans[p[i]]; + } + /* XXX Don't use mbrtowc, we know which conversion + to use (UTF-8 -> UCS4). */ + memset (&cur_state, 0, sizeof (cur_state)); + mbclen = __mbrtowc (&wc2, (const char *) p, mlen, + &cur_state); + if (raw + offset - p <= mbclen + && mbclen < (size_t) -2) + { + memset (&pstr->cur_state, '\0', + sizeof (mbstate_t)); + pstr->valid_len = mbclen - (raw + offset - p); + wc = wc2; + } + break; + } + } + + if (wc == WEOF) + pstr->valid_len = re_string_skip_chars (pstr, idx, &wc) - idx; + if (wc == WEOF) + pstr->tip_context + = re_string_context_at (pstr, prev_valid_len - 1, eflags); + else + pstr->tip_context = ((BE (pstr->word_ops_used != 0, 0) + && IS_WIDE_WORD_CHAR (wc)) + ? CONTEXT_WORD + : ((IS_WIDE_NEWLINE (wc) + && pstr->newline_anchor) + ? CONTEXT_NEWLINE : 0)); + if (BE (pstr->valid_len, 0)) + { + for (wcs_idx = 0; wcs_idx < pstr->valid_len; ++wcs_idx) + pstr->wcs[wcs_idx] = WEOF; + if (pstr->mbs_allocated) + memset (pstr->mbs, 255, pstr->valid_len); + } + pstr->valid_raw_len = pstr->valid_len; + } + else +#endif /* RE_ENABLE_I18N */ + { + int c = pstr->raw_mbs[pstr->raw_mbs_idx + offset - 1]; + pstr->valid_raw_len = 0; + if (pstr->trans) + c = pstr->trans[c]; + pstr->tip_context = (bitset_contain (pstr->word_char, c) + ? CONTEXT_WORD + : ((IS_NEWLINE (c) && pstr->newline_anchor) + ? CONTEXT_NEWLINE : 0)); + } + } + if (!BE (pstr->mbs_allocated, 0)) + pstr->mbs += offset; + } + pstr->raw_mbs_idx = idx; + pstr->len -= offset; + pstr->stop -= offset; + + /* Then build the buffers. */ +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + { + if (pstr->icase) + { + reg_errcode_t ret = build_wcs_upper_buffer (pstr); + if (BE (ret != REG_NOERROR, 0)) + return ret; + } + else + build_wcs_buffer (pstr); + } + else +#endif /* RE_ENABLE_I18N */ + if (BE (pstr->mbs_allocated, 0)) + { + if (pstr->icase) + build_upper_buffer (pstr); + else if (pstr->trans != NULL) + re_string_translate_buffer (pstr); + } + else + pstr->valid_len = pstr->len; + + pstr->cur_idx = 0; + return REG_NOERROR; +} + +static unsigned char +internal_function __attribute ((pure)) +re_string_peek_byte_case (const re_string_t *pstr, int idx) +{ + int ch, off; + + /* Handle the common (easiest) cases first. */ + if (BE (!pstr->mbs_allocated, 1)) + return re_string_peek_byte (pstr, idx); + +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1 + && ! re_string_is_single_byte_char (pstr, pstr->cur_idx + idx)) + return re_string_peek_byte (pstr, idx); +#endif + + off = pstr->cur_idx + idx; +#ifdef RE_ENABLE_I18N + if (pstr->offsets_needed) + off = pstr->offsets[off]; +#endif + + ch = pstr->raw_mbs[pstr->raw_mbs_idx + off]; + +#ifdef RE_ENABLE_I18N + /* Ensure that e.g. for tr_TR.UTF-8 BACKSLASH DOTLESS SMALL LETTER I + this function returns CAPITAL LETTER I instead of first byte of + DOTLESS SMALL LETTER I. The latter would confuse the parser, + since peek_byte_case doesn't advance cur_idx in any way. */ + if (pstr->offsets_needed && !isascii (ch)) + return re_string_peek_byte (pstr, idx); +#endif + + return ch; +} + +static unsigned char +internal_function __attribute ((pure)) +re_string_fetch_byte_case (re_string_t *pstr) +{ + if (BE (!pstr->mbs_allocated, 1)) + return re_string_fetch_byte (pstr); + +#ifdef RE_ENABLE_I18N + if (pstr->offsets_needed) + { + int off, ch; + + /* For tr_TR.UTF-8 [[:islower:]] there is + [[: CAPITAL LETTER I WITH DOT lower:]] in mbs. Skip + in that case the whole multi-byte character and return + the original letter. On the other side, with + [[: DOTLESS SMALL LETTER I return [[:I, as doing + anything else would complicate things too much. */ + + if (!re_string_first_byte (pstr, pstr->cur_idx)) + return re_string_fetch_byte (pstr); + + off = pstr->offsets[pstr->cur_idx]; + ch = pstr->raw_mbs[pstr->raw_mbs_idx + off]; + + if (! isascii (ch)) + return re_string_fetch_byte (pstr); + + re_string_skip_bytes (pstr, + re_string_char_size_at (pstr, pstr->cur_idx)); + return ch; + } +#endif + + return pstr->raw_mbs[pstr->raw_mbs_idx + pstr->cur_idx++]; +} + +static void +internal_function +re_string_destruct (re_string_t *pstr) +{ +#ifdef RE_ENABLE_I18N + re_free (pstr->wcs); + re_free (pstr->offsets); +#endif /* RE_ENABLE_I18N */ + if (pstr->mbs_allocated) + re_free (pstr->mbs); +} + +/* Return the context at IDX in INPUT. */ + +static unsigned int +internal_function +re_string_context_at (const re_string_t *input, int idx, int eflags) +{ + int c; + if (BE (idx < 0, 0)) + /* In this case, we use the value stored in input->tip_context, + since we can't know the character in input->mbs[-1] here. */ + return input->tip_context; + if (BE (idx == input->len, 0)) + return ((eflags & REG_NOTEOL) ? CONTEXT_ENDBUF + : CONTEXT_NEWLINE | CONTEXT_ENDBUF); +#ifdef RE_ENABLE_I18N + if (input->mb_cur_max > 1) + { + wint_t wc; + int wc_idx = idx; + while(input->wcs[wc_idx] == WEOF) + { +#ifdef DEBUG + /* It must not happen. */ + assert (wc_idx >= 0); +#endif + --wc_idx; + if (wc_idx < 0) + return input->tip_context; + } + wc = input->wcs[wc_idx]; + if (BE (input->word_ops_used != 0, 0) && IS_WIDE_WORD_CHAR (wc)) + return CONTEXT_WORD; + return (IS_WIDE_NEWLINE (wc) && input->newline_anchor + ? CONTEXT_NEWLINE : 0); + } + else +#endif + { + c = re_string_byte_at (input, idx); + if (bitset_contain (input->word_char, c)) + return CONTEXT_WORD; + return IS_NEWLINE (c) && input->newline_anchor ? CONTEXT_NEWLINE : 0; + } +} + +/* Functions for set operation. */ + +static reg_errcode_t +internal_function +re_node_set_alloc (re_node_set *set, int size) +{ + /* + * ADR: valgrind says size can be 0, which then doesn't + * free the block of size 0. Harumph. This seems + * to work ok, though. + */ + if (size == 0) + { + memset(set, 0, sizeof(*set)); + return REG_NOERROR; + } + set->alloc = size; + set->nelem = 0; + set->elems = re_malloc (int, size); + if (BE (set->elems == NULL, 0)) + return REG_ESPACE; + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +re_node_set_init_1 (re_node_set *set, int elem) +{ + set->alloc = 1; + set->nelem = 1; + set->elems = re_malloc (int, 1); + if (BE (set->elems == NULL, 0)) + { + set->alloc = set->nelem = 0; + return REG_ESPACE; + } + set->elems[0] = elem; + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +re_node_set_init_2 (re_node_set *set, int elem1, int elem2) +{ + set->alloc = 2; + set->elems = re_malloc (int, 2); + if (BE (set->elems == NULL, 0)) + return REG_ESPACE; + if (elem1 == elem2) + { + set->nelem = 1; + set->elems[0] = elem1; + } + else + { + set->nelem = 2; + if (elem1 < elem2) + { + set->elems[0] = elem1; + set->elems[1] = elem2; + } + else + { + set->elems[0] = elem2; + set->elems[1] = elem1; + } + } + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +re_node_set_init_copy (re_node_set *dest, const re_node_set *src) +{ + dest->nelem = src->nelem; + if (src->nelem > 0) + { + dest->alloc = dest->nelem; + dest->elems = re_malloc (int, dest->alloc); + if (BE (dest->elems == NULL, 0)) + { + dest->alloc = dest->nelem = 0; + return REG_ESPACE; + } + memcpy (dest->elems, src->elems, src->nelem * sizeof (int)); + } + else + re_node_set_init_empty (dest); + return REG_NOERROR; +} + +/* Calculate the intersection of the sets SRC1 and SRC2. And merge it to + DEST. Return value indicate the error code or REG_NOERROR if succeeded. + Note: We assume dest->elems is NULL, when dest->alloc is 0. */ + +static reg_errcode_t +internal_function +re_node_set_add_intersect (re_node_set *dest, const re_node_set *src1, + const re_node_set *src2) +{ + int i1, i2, is, id, delta, sbase; + if (src1->nelem == 0 || src2->nelem == 0) + return REG_NOERROR; + + /* We need dest->nelem + 2 * elems_in_intersection; this is a + conservative estimate. */ + if (src1->nelem + src2->nelem + dest->nelem > dest->alloc) + { + int new_alloc = src1->nelem + src2->nelem + dest->alloc; + int *new_elems = re_realloc (dest->elems, int, new_alloc); + if (BE (new_elems == NULL, 0)) + return REG_ESPACE; + dest->elems = new_elems; + dest->alloc = new_alloc; + } + + /* Find the items in the intersection of SRC1 and SRC2, and copy + into the top of DEST those that are not already in DEST itself. */ + sbase = dest->nelem + src1->nelem + src2->nelem; + i1 = src1->nelem - 1; + i2 = src2->nelem - 1; + id = dest->nelem - 1; + for (;;) + { + if (src1->elems[i1] == src2->elems[i2]) + { + /* Try to find the item in DEST. Maybe we could binary search? */ + while (id >= 0 && dest->elems[id] > src1->elems[i1]) + --id; + + if (id < 0 || dest->elems[id] != src1->elems[i1]) + dest->elems[--sbase] = src1->elems[i1]; + + if (--i1 < 0 || --i2 < 0) + break; + } + + /* Lower the highest of the two items. */ + else if (src1->elems[i1] < src2->elems[i2]) + { + if (--i2 < 0) + break; + } + else + { + if (--i1 < 0) + break; + } + } + + id = dest->nelem - 1; + is = dest->nelem + src1->nelem + src2->nelem - 1; + delta = is - sbase + 1; + + /* Now copy. When DELTA becomes zero, the remaining + DEST elements are already in place; this is more or + less the same loop that is in re_node_set_merge. */ + dest->nelem += delta; + if (delta > 0 && id >= 0) + for (;;) + { + if (dest->elems[is] > dest->elems[id]) + { + /* Copy from the top. */ + dest->elems[id + delta--] = dest->elems[is--]; + if (delta == 0) + break; + } + else + { + /* Slide from the bottom. */ + dest->elems[id + delta] = dest->elems[id]; + if (--id < 0) + break; + } + } + + /* Copy remaining SRC elements. */ + memcpy (dest->elems, dest->elems + sbase, delta * sizeof (int)); + + return REG_NOERROR; +} + +/* Calculate the union set of the sets SRC1 and SRC2. And store it to + DEST. Return value indicate the error code or REG_NOERROR if succeeded. */ + +static reg_errcode_t +internal_function +re_node_set_init_union (re_node_set *dest, const re_node_set *src1, + const re_node_set *src2) +{ + int i1, i2, id; + if (src1 != NULL && src1->nelem > 0 && src2 != NULL && src2->nelem > 0) + { + dest->alloc = src1->nelem + src2->nelem; + dest->elems = re_malloc (int, dest->alloc); + if (BE (dest->elems == NULL, 0)) + return REG_ESPACE; + } + else + { + if (src1 != NULL && src1->nelem > 0) + return re_node_set_init_copy (dest, src1); + else if (src2 != NULL && src2->nelem > 0) + return re_node_set_init_copy (dest, src2); + else + re_node_set_init_empty (dest); + return REG_NOERROR; + } + for (i1 = i2 = id = 0 ; i1 < src1->nelem && i2 < src2->nelem ;) + { + if (src1->elems[i1] > src2->elems[i2]) + { + dest->elems[id++] = src2->elems[i2++]; + continue; + } + if (src1->elems[i1] == src2->elems[i2]) + ++i2; + dest->elems[id++] = src1->elems[i1++]; + } + if (i1 < src1->nelem) + { + memcpy (dest->elems + id, src1->elems + i1, + (src1->nelem - i1) * sizeof (int)); + id += src1->nelem - i1; + } + else if (i2 < src2->nelem) + { + memcpy (dest->elems + id, src2->elems + i2, + (src2->nelem - i2) * sizeof (int)); + id += src2->nelem - i2; + } + dest->nelem = id; + return REG_NOERROR; +} + +/* Calculate the union set of the sets DEST and SRC. And store it to + DEST. Return value indicate the error code or REG_NOERROR if succeeded. */ + +static reg_errcode_t +internal_function +re_node_set_merge (re_node_set *dest, const re_node_set *src) +{ + int is, id, sbase, delta; + if (src == NULL || src->nelem == 0) + return REG_NOERROR; + if (dest->alloc < 2 * src->nelem + dest->nelem) + { + int new_alloc = 2 * (src->nelem + dest->alloc); + int *new_buffer = re_realloc (dest->elems, int, new_alloc); + if (BE (new_buffer == NULL, 0)) + return REG_ESPACE; + dest->elems = new_buffer; + dest->alloc = new_alloc; + } + + if (BE (dest->nelem == 0, 0)) + { + dest->nelem = src->nelem; + memcpy (dest->elems, src->elems, src->nelem * sizeof (int)); + return REG_NOERROR; + } + + /* Copy into the top of DEST the items of SRC that are not + found in DEST. Maybe we could binary search in DEST? */ + for (sbase = dest->nelem + 2 * src->nelem, + is = src->nelem - 1, id = dest->nelem - 1; is >= 0 && id >= 0; ) + { + if (dest->elems[id] == src->elems[is]) + is--, id--; + else if (dest->elems[id] < src->elems[is]) + dest->elems[--sbase] = src->elems[is--]; + else /* if (dest->elems[id] > src->elems[is]) */ + --id; + } + + if (is >= 0) + { + /* If DEST is exhausted, the remaining items of SRC must be unique. */ + sbase -= is + 1; + memcpy (dest->elems + sbase, src->elems, (is + 1) * sizeof (int)); + } + + id = dest->nelem - 1; + is = dest->nelem + 2 * src->nelem - 1; + delta = is - sbase + 1; + if (delta == 0) + return REG_NOERROR; + + /* Now copy. When DELTA becomes zero, the remaining + DEST elements are already in place. */ + dest->nelem += delta; + for (;;) + { + if (dest->elems[is] > dest->elems[id]) + { + /* Copy from the top. */ + dest->elems[id + delta--] = dest->elems[is--]; + if (delta == 0) + break; + } + else + { + /* Slide from the bottom. */ + dest->elems[id + delta] = dest->elems[id]; + if (--id < 0) + { + /* Copy remaining SRC elements. */ + memcpy (dest->elems, dest->elems + sbase, + delta * sizeof (int)); + break; + } + } + } + + return REG_NOERROR; +} + +/* Insert the new element ELEM to the re_node_set* SET. + SET should not already have ELEM. + return -1 if an error has occurred, return 1 otherwise. */ + +static int +internal_function +re_node_set_insert (re_node_set *set, int elem) +{ + int idx; + /* In case the set is empty. */ + if (set->alloc == 0) + { + if (BE (re_node_set_init_1 (set, elem) == REG_NOERROR, 1)) + return 1; + else + return -1; + } + + if (BE (set->nelem, 0) == 0) + { + /* We already guaranteed above that set->alloc != 0. */ + set->elems[0] = elem; + ++set->nelem; + return 1; + } + + /* Realloc if we need. */ + if (set->alloc == set->nelem) + { + int *new_elems; + set->alloc = set->alloc * 2; + new_elems = re_realloc (set->elems, int, set->alloc); + if (BE (new_elems == NULL, 0)) + return -1; + set->elems = new_elems; + } + + /* Move the elements which follows the new element. Test the + first element separately to skip a check in the inner loop. */ + if (elem < set->elems[0]) + { + idx = 0; + for (idx = set->nelem; idx > 0; idx--) + set->elems[idx] = set->elems[idx - 1]; + } + else + { + for (idx = set->nelem; set->elems[idx - 1] > elem; idx--) + set->elems[idx] = set->elems[idx - 1]; + } + + /* Insert the new element. */ + set->elems[idx] = elem; + ++set->nelem; + return 1; +} + +/* Insert the new element ELEM to the re_node_set* SET. + SET should not already have any element greater than or equal to ELEM. + Return -1 if an error has occurred, return 1 otherwise. */ + +static int +internal_function +re_node_set_insert_last (re_node_set *set, int elem) +{ + /* Realloc if we need. */ + if (set->alloc == set->nelem) + { + int *new_elems; + set->alloc = (set->alloc + 1) * 2; + new_elems = re_realloc (set->elems, int, set->alloc); + if (BE (new_elems == NULL, 0)) + return -1; + set->elems = new_elems; + } + + /* Insert the new element. */ + set->elems[set->nelem++] = elem; + return 1; +} + +/* Compare two node sets SET1 and SET2. + return 1 if SET1 and SET2 are equivalent, return 0 otherwise. */ + +static int +internal_function __attribute ((pure)) +re_node_set_compare (const re_node_set *set1, const re_node_set *set2) +{ + int i; + if (set1 == NULL || set2 == NULL || set1->nelem != set2->nelem) + return 0; + for (i = set1->nelem ; --i >= 0 ; ) + if (set1->elems[i] != set2->elems[i]) + return 0; + return 1; +} + +/* Return (idx + 1) if SET contains the element ELEM, return 0 otherwise. */ + +static int +internal_function __attribute ((pure)) +re_node_set_contains (const re_node_set *set, int elem) +{ + unsigned int idx, right, mid; + if (set->nelem <= 0) + return 0; + + /* Binary search the element. */ + idx = 0; + right = set->nelem - 1; + while (idx < right) + { + mid = (idx + right) / 2; + if (set->elems[mid] < elem) + idx = mid + 1; + else + right = mid; + } + return set->elems[idx] == elem ? idx + 1 : 0; +} + +static void +internal_function +re_node_set_remove_at (re_node_set *set, int idx) +{ + if (idx < 0 || idx >= set->nelem) + return; + --set->nelem; + for (; idx < set->nelem; idx++) + set->elems[idx] = set->elems[idx + 1]; +} + + +/* Add the token TOKEN to dfa->nodes, and return the index of the token. + Or return -1, if an error has occurred. */ + +static int +internal_function +re_dfa_add_node (re_dfa_t *dfa, re_token_t token) +{ + if (BE (dfa->nodes_len >= dfa->nodes_alloc, 0)) + { + size_t new_nodes_alloc = dfa->nodes_alloc * 2; + int *new_nexts, *new_indices; + re_node_set *new_edests, *new_eclosures; + re_token_t *new_nodes; + + /* Avoid overflows in realloc. */ + const size_t max_object_size = MAX (sizeof (re_token_t), + MAX (sizeof (re_node_set), + sizeof (int))); + if (BE (SIZE_MAX / max_object_size < new_nodes_alloc, 0)) + return -1; + + new_nodes = re_realloc (dfa->nodes, re_token_t, new_nodes_alloc); + if (BE (new_nodes == NULL, 0)) + return -1; + dfa->nodes = new_nodes; + new_nexts = re_realloc (dfa->nexts, int, new_nodes_alloc); + new_indices = re_realloc (dfa->org_indices, int, new_nodes_alloc); + new_edests = re_realloc (dfa->edests, re_node_set, new_nodes_alloc); + new_eclosures = re_realloc (dfa->eclosures, re_node_set, new_nodes_alloc); + if (BE (new_nexts == NULL || new_indices == NULL + || new_edests == NULL || new_eclosures == NULL, 0)) + return -1; + dfa->nexts = new_nexts; + dfa->org_indices = new_indices; + dfa->edests = new_edests; + dfa->eclosures = new_eclosures; + dfa->nodes_alloc = new_nodes_alloc; + } + dfa->nodes[dfa->nodes_len] = token; + dfa->nodes[dfa->nodes_len].constraint = 0; +#ifdef RE_ENABLE_I18N + dfa->nodes[dfa->nodes_len].accept_mb = + (token.type == OP_PERIOD && dfa->mb_cur_max > 1) || token.type == COMPLEX_BRACKET; +#endif + dfa->nexts[dfa->nodes_len] = -1; + re_node_set_init_empty (dfa->edests + dfa->nodes_len); + re_node_set_init_empty (dfa->eclosures + dfa->nodes_len); + return dfa->nodes_len++; +} + +static inline unsigned int +internal_function +calc_state_hash (const re_node_set *nodes, unsigned int context) +{ + unsigned int hash = nodes->nelem + context; + int i; + for (i = 0 ; i < nodes->nelem ; i++) + hash += nodes->elems[i]; + return hash; +} + +/* Search for the state whose node_set is equivalent to NODES. + Return the pointer to the state, if we found it in the DFA. + Otherwise create the new one and return it. In case of an error + return NULL and set the error code in ERR. + Note: - We assume NULL as the invalid state, then it is possible that + return value is NULL and ERR is REG_NOERROR. + - We never return non-NULL value in case of any errors, it is for + optimization. */ + +static re_dfastate_t * +internal_function +re_acquire_state (reg_errcode_t *err, const re_dfa_t *dfa, + const re_node_set *nodes) +{ + unsigned int hash; + re_dfastate_t *new_state; + struct re_state_table_entry *spot; + int i; + if (BE (nodes->nelem == 0, 0)) + { + *err = REG_NOERROR; + return NULL; + } + hash = calc_state_hash (nodes, 0); + spot = dfa->state_table + (hash & dfa->state_hash_mask); + + for (i = 0 ; i < spot->num ; i++) + { + re_dfastate_t *state = spot->array[i]; + if (hash != state->hash) + continue; + if (re_node_set_compare (&state->nodes, nodes)) + return state; + } + + /* There are no appropriate state in the dfa, create the new one. */ + new_state = create_ci_newstate (dfa, nodes, hash); + if (BE (new_state == NULL, 0)) + *err = REG_ESPACE; + + return new_state; +} + +/* Search for the state whose node_set is equivalent to NODES and + whose context is equivalent to CONTEXT. + Return the pointer to the state, if we found it in the DFA. + Otherwise create the new one and return it. In case of an error + return NULL and set the error code in ERR. + Note: - We assume NULL as the invalid state, then it is possible that + return value is NULL and ERR is REG_NOERROR. + - We never return non-NULL value in case of any errors, it is for + optimization. */ + +static re_dfastate_t * +internal_function +re_acquire_state_context (reg_errcode_t *err, const re_dfa_t *dfa, + const re_node_set *nodes, unsigned int context) +{ + unsigned int hash; + re_dfastate_t *new_state; + struct re_state_table_entry *spot; + int i; + if (nodes->nelem == 0) + { + *err = REG_NOERROR; + return NULL; + } + hash = calc_state_hash (nodes, context); + spot = dfa->state_table + (hash & dfa->state_hash_mask); + + for (i = 0 ; i < spot->num ; i++) + { + re_dfastate_t *state = spot->array[i]; + if (state->hash == hash + && state->context == context + && re_node_set_compare (state->entrance_nodes, nodes)) + return state; + } + /* There are no appropriate state in `dfa', create the new one. */ + new_state = create_cd_newstate (dfa, nodes, context, hash); + if (BE (new_state == NULL, 0)) + *err = REG_ESPACE; + + return new_state; +} + +/* Finish initialization of the new state NEWSTATE, and using its hash value + HASH put in the appropriate bucket of DFA's state table. Return value + indicates the error code if failed. */ + +static reg_errcode_t +register_state (const re_dfa_t *dfa, re_dfastate_t *newstate, + unsigned int hash) +{ + struct re_state_table_entry *spot; + reg_errcode_t err; + int i; + + newstate->hash = hash; + err = re_node_set_alloc (&newstate->non_eps_nodes, newstate->nodes.nelem); + if (BE (err != REG_NOERROR, 0)) + return REG_ESPACE; + for (i = 0; i < newstate->nodes.nelem; i++) + { + int elem = newstate->nodes.elems[i]; + if (!IS_EPSILON_NODE (dfa->nodes[elem].type)) + if (re_node_set_insert_last (&newstate->non_eps_nodes, elem) < 0) + return REG_ESPACE; + } + + spot = dfa->state_table + (hash & dfa->state_hash_mask); + if (BE (spot->alloc <= spot->num, 0)) + { + int new_alloc = 2 * spot->num + 2; + re_dfastate_t **new_array = re_realloc (spot->array, re_dfastate_t *, + new_alloc); + if (BE (new_array == NULL, 0)) + return REG_ESPACE; + spot->array = new_array; + spot->alloc = new_alloc; + } + spot->array[spot->num++] = newstate; + return REG_NOERROR; +} + +static void +free_state (re_dfastate_t *state) +{ + re_node_set_free (&state->non_eps_nodes); + re_node_set_free (&state->inveclosure); + if (state->entrance_nodes != &state->nodes) + { + re_node_set_free (state->entrance_nodes); + re_free (state->entrance_nodes); + } + re_node_set_free (&state->nodes); + re_free (state->word_trtable); + re_free (state->trtable); + re_free (state); +} + +/* Create the new state which is independ of contexts. + Return the new state if succeeded, otherwise return NULL. */ + +static re_dfastate_t * +internal_function +create_ci_newstate (const re_dfa_t *dfa, const re_node_set *nodes, + unsigned int hash) +{ + int i; + reg_errcode_t err; + re_dfastate_t *newstate; + + newstate = (re_dfastate_t *) calloc (sizeof (re_dfastate_t), 1); + if (BE (newstate == NULL, 0)) + return NULL; + err = re_node_set_init_copy (&newstate->nodes, nodes); + if (BE (err != REG_NOERROR, 0)) + { + re_free (newstate); + return NULL; + } + + newstate->entrance_nodes = &newstate->nodes; + for (i = 0 ; i < nodes->nelem ; i++) + { + re_token_t *node = dfa->nodes + nodes->elems[i]; + re_token_type_t type = node->type; + if (type == CHARACTER && !node->constraint) + continue; +#ifdef RE_ENABLE_I18N + newstate->accept_mb |= node->accept_mb; +#endif /* RE_ENABLE_I18N */ + + /* If the state has the halt node, the state is a halt state. */ + if (type == END_OF_RE) + newstate->halt = 1; + else if (type == OP_BACK_REF) + newstate->has_backref = 1; + else if (type == ANCHOR || node->constraint) + newstate->has_constraint = 1; + } + err = register_state (dfa, newstate, hash); + if (BE (err != REG_NOERROR, 0)) + { + free_state (newstate); + newstate = NULL; + } + return newstate; +} + +/* Create the new state which is depend on the context CONTEXT. + Return the new state if succeeded, otherwise return NULL. */ + +static re_dfastate_t * +internal_function +create_cd_newstate (const re_dfa_t *dfa, const re_node_set *nodes, + unsigned int context, unsigned int hash) +{ + int i, nctx_nodes = 0; + reg_errcode_t err; + re_dfastate_t *newstate; + + newstate = (re_dfastate_t *) calloc (sizeof (re_dfastate_t), 1); + if (BE (newstate == NULL, 0)) + return NULL; + err = re_node_set_init_copy (&newstate->nodes, nodes); + if (BE (err != REG_NOERROR, 0)) + { + re_free (newstate); + return NULL; + } + + newstate->context = context; + newstate->entrance_nodes = &newstate->nodes; + + for (i = 0 ; i < nodes->nelem ; i++) + { + re_token_t *node = dfa->nodes + nodes->elems[i]; + re_token_type_t type = node->type; + unsigned int constraint = node->constraint; + + if (type == CHARACTER && !constraint) + continue; +#ifdef RE_ENABLE_I18N + newstate->accept_mb |= node->accept_mb; +#endif /* RE_ENABLE_I18N */ + + /* If the state has the halt node, the state is a halt state. */ + if (type == END_OF_RE) + newstate->halt = 1; + else if (type == OP_BACK_REF) + newstate->has_backref = 1; + + if (constraint) + { + if (newstate->entrance_nodes == &newstate->nodes) + { + newstate->entrance_nodes = re_malloc (re_node_set, 1); + if (BE (newstate->entrance_nodes == NULL, 0)) + { + free_state (newstate); + return NULL; + } + if (re_node_set_init_copy (newstate->entrance_nodes, nodes) + != REG_NOERROR) + return NULL; + nctx_nodes = 0; + newstate->has_constraint = 1; + } + + if (NOT_SATISFY_PREV_CONSTRAINT (constraint,context)) + { + re_node_set_remove_at (&newstate->nodes, i - nctx_nodes); + ++nctx_nodes; + } + } + } + err = register_state (dfa, newstate, hash); + if (BE (err != REG_NOERROR, 0)) + { + free_state (newstate); + newstate = NULL; + } + return newstate; +} diff --git a/compat/regex/regex_internal.h b/compat/regex/regex_internal.h new file mode 100644 index 0000000000..4184d7f5a6 --- /dev/null +++ b/compat/regex/regex_internal.h @@ -0,0 +1,810 @@ +/* Extended regular expression matching and search library. + Copyright (C) 2002-2005, 2007, 2008, 2010 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Isamu Hasegawa <isamu@yamato.ibm.com>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef _REGEX_INTERNAL_H +#define _REGEX_INTERNAL_H 1 + +#include <assert.h> +#include <ctype.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#if defined HAVE_LANGINFO_H || defined HAVE_LANGINFO_CODESET || defined _LIBC +# include <langinfo.h> +#endif +#if defined HAVE_LOCALE_H || defined _LIBC +# include <locale.h> +#endif +#if defined HAVE_WCHAR_H || defined _LIBC +# include <wchar.h> +#endif /* HAVE_WCHAR_H || _LIBC */ +#if defined HAVE_WCTYPE_H || defined _LIBC +# include <wctype.h> +#endif /* HAVE_WCTYPE_H || _LIBC */ +#if defined HAVE_STDBOOL_H || defined _LIBC +# include <stdbool.h> +#endif /* HAVE_STDBOOL_H || _LIBC */ +#if !defined(ZOS_USS) +#if defined HAVE_STDINT_H || defined _LIBC +# include <stdint.h> +#endif /* HAVE_STDINT_H || _LIBC */ +#endif /* !ZOS_USS */ +#if defined _LIBC +# include <bits/libc-lock.h> +#else +# define __libc_lock_define(CLASS,NAME) +# define __libc_lock_init(NAME) do { } while (0) +# define __libc_lock_lock(NAME) do { } while (0) +# define __libc_lock_unlock(NAME) do { } while (0) +#endif + +#ifndef GAWK +/* In case that the system doesn't have isblank(). */ +#if !defined _LIBC && !defined HAVE_ISBLANK && !defined isblank +# define isblank(ch) ((ch) == ' ' || (ch) == '\t') +#endif +#else /* GAWK */ +/* + * This is a freaking mess. On glibc systems you have to define + * a magic constant to get isblank() out of <ctype.h>, since it's + * a C99 function. To heck with all that and borrow a page from + * dfa.c's book. + */ + +static int +is_blank (int c) +{ + return (c == ' ' || c == '\t'); +} +#endif /* GAWK */ + +#ifdef _LIBC +# ifndef _RE_DEFINE_LOCALE_FUNCTIONS +# define _RE_DEFINE_LOCALE_FUNCTIONS 1 +# include <locale/localeinfo.h> +# include <locale/elem-hash.h> +# include <locale/coll-lookup.h> +# endif +#endif + +/* This is for other GNU distributions with internationalized messages. */ +#if (HAVE_LIBINTL_H && ENABLE_NLS) || defined _LIBC +# include <libintl.h> +# ifdef _LIBC +# undef gettext +# define gettext(msgid) \ + INTUSE(__dcgettext) (_libc_intl_domainname, msgid, LC_MESSAGES) +# endif +#else +# define gettext(msgid) (msgid) +#endif + +#ifndef gettext_noop +/* This define is so xgettext can find the internationalizable + strings. */ +# define gettext_noop(String) String +#endif + +/* For loser systems without the definition. */ +#ifndef SIZE_MAX +# define SIZE_MAX ((size_t) -1) +#endif + +#ifndef NO_MBSUPPORT +#include "mbsupport.h" /* gawk */ +#endif +#ifndef MB_CUR_MAX +#define MB_CUR_MAX 1 +#endif + +#if (defined MBS_SUPPORT) || _LIBC +# define RE_ENABLE_I18N +#endif + +#if __GNUC__ >= 3 +# define BE(expr, val) __builtin_expect (expr, val) +#else +# define BE(expr, val) (expr) +# ifdef inline +# undef inline +# endif +# define inline +#endif + +/* Number of single byte character. */ +#define SBC_MAX 256 + +#define COLL_ELEM_LEN_MAX 8 + +/* The character which represents newline. */ +#define NEWLINE_CHAR '\n' +#define WIDE_NEWLINE_CHAR L'\n' + +/* Rename to standard API for using out of glibc. */ +#ifndef _LIBC +# ifdef __wctype +# undef __wctype +# endif +# define __wctype wctype +# ifdef __iswctype +# undef __iswctype +# endif +# define __iswctype iswctype +# define __btowc btowc +# define __mbrtowc mbrtowc +#undef __mempcpy /* GAWK */ +# define __mempcpy mempcpy +# define __wcrtomb wcrtomb +# define __regfree regfree +# define attribute_hidden +#endif /* not _LIBC */ + +#ifdef __GNUC__ +# define __attribute(arg) __attribute__ (arg) +#else +# define __attribute(arg) +#endif + +extern const char __re_error_msgid[] attribute_hidden; +extern const size_t __re_error_msgid_idx[] attribute_hidden; + +/* An integer used to represent a set of bits. It must be unsigned, + and must be at least as wide as unsigned int. */ +typedef unsigned long int bitset_word_t; +/* All bits set in a bitset_word_t. */ +#define BITSET_WORD_MAX ULONG_MAX +/* Number of bits in a bitset_word_t. */ +#define BITSET_WORD_BITS (sizeof (bitset_word_t) * CHAR_BIT) +/* Number of bitset_word_t in a bit_set. */ +#define BITSET_WORDS (SBC_MAX / BITSET_WORD_BITS) +typedef bitset_word_t bitset_t[BITSET_WORDS]; +typedef bitset_word_t *re_bitset_ptr_t; +typedef const bitset_word_t *re_const_bitset_ptr_t; + +#define bitset_set(set,i) \ + (set[i / BITSET_WORD_BITS] |= (bitset_word_t) 1 << i % BITSET_WORD_BITS) +#define bitset_clear(set,i) \ + (set[i / BITSET_WORD_BITS] &= ~((bitset_word_t) 1 << i % BITSET_WORD_BITS)) +#define bitset_contain(set,i) \ + (set[i / BITSET_WORD_BITS] & ((bitset_word_t) 1 << i % BITSET_WORD_BITS)) +#define bitset_empty(set) memset (set, '\0', sizeof (bitset_t)) +#define bitset_set_all(set) memset (set, '\xff', sizeof (bitset_t)) +#define bitset_copy(dest,src) memcpy (dest, src, sizeof (bitset_t)) + +#define PREV_WORD_CONSTRAINT 0x0001 +#define PREV_NOTWORD_CONSTRAINT 0x0002 +#define NEXT_WORD_CONSTRAINT 0x0004 +#define NEXT_NOTWORD_CONSTRAINT 0x0008 +#define PREV_NEWLINE_CONSTRAINT 0x0010 +#define NEXT_NEWLINE_CONSTRAINT 0x0020 +#define PREV_BEGBUF_CONSTRAINT 0x0040 +#define NEXT_ENDBUF_CONSTRAINT 0x0080 +#define WORD_DELIM_CONSTRAINT 0x0100 +#define NOT_WORD_DELIM_CONSTRAINT 0x0200 + +typedef enum +{ + INSIDE_WORD = PREV_WORD_CONSTRAINT | NEXT_WORD_CONSTRAINT, + WORD_FIRST = PREV_NOTWORD_CONSTRAINT | NEXT_WORD_CONSTRAINT, + WORD_LAST = PREV_WORD_CONSTRAINT | NEXT_NOTWORD_CONSTRAINT, + INSIDE_NOTWORD = PREV_NOTWORD_CONSTRAINT | NEXT_NOTWORD_CONSTRAINT, + LINE_FIRST = PREV_NEWLINE_CONSTRAINT, + LINE_LAST = NEXT_NEWLINE_CONSTRAINT, + BUF_FIRST = PREV_BEGBUF_CONSTRAINT, + BUF_LAST = NEXT_ENDBUF_CONSTRAINT, + WORD_DELIM = WORD_DELIM_CONSTRAINT, + NOT_WORD_DELIM = NOT_WORD_DELIM_CONSTRAINT +} re_context_type; + +typedef struct +{ + int alloc; + int nelem; + int *elems; +} re_node_set; + +typedef enum +{ + NON_TYPE = 0, + + /* Node type, These are used by token, node, tree. */ + CHARACTER = 1, + END_OF_RE = 2, + SIMPLE_BRACKET = 3, + OP_BACK_REF = 4, + OP_PERIOD = 5, +#ifdef RE_ENABLE_I18N + COMPLEX_BRACKET = 6, + OP_UTF8_PERIOD = 7, +#endif /* RE_ENABLE_I18N */ + + /* We define EPSILON_BIT as a macro so that OP_OPEN_SUBEXP is used + when the debugger shows values of this enum type. */ +#define EPSILON_BIT 8 + OP_OPEN_SUBEXP = EPSILON_BIT | 0, + OP_CLOSE_SUBEXP = EPSILON_BIT | 1, + OP_ALT = EPSILON_BIT | 2, + OP_DUP_ASTERISK = EPSILON_BIT | 3, + ANCHOR = EPSILON_BIT | 4, + + /* Tree type, these are used only by tree. */ + CONCAT = 16, + SUBEXP = 17, + + /* Token type, these are used only by token. */ + OP_DUP_PLUS = 18, + OP_DUP_QUESTION, + OP_OPEN_BRACKET, + OP_CLOSE_BRACKET, + OP_CHARSET_RANGE, + OP_OPEN_DUP_NUM, + OP_CLOSE_DUP_NUM, + OP_NON_MATCH_LIST, + OP_OPEN_COLL_ELEM, + OP_CLOSE_COLL_ELEM, + OP_OPEN_EQUIV_CLASS, + OP_CLOSE_EQUIV_CLASS, + OP_OPEN_CHAR_CLASS, + OP_CLOSE_CHAR_CLASS, + OP_WORD, + OP_NOTWORD, + OP_SPACE, + OP_NOTSPACE, + BACK_SLASH + +} re_token_type_t; + +#ifdef RE_ENABLE_I18N +typedef struct +{ + /* Multibyte characters. */ + wchar_t *mbchars; + + /* Collating symbols. */ +# ifdef _LIBC + int32_t *coll_syms; +# endif + + /* Equivalence classes. */ +# ifdef _LIBC + int32_t *equiv_classes; +# endif + + /* Range expressions. */ +# ifdef _LIBC + uint32_t *range_starts; + uint32_t *range_ends; +# else /* not _LIBC */ + wchar_t *range_starts; + wchar_t *range_ends; +# endif /* not _LIBC */ + + /* Character classes. */ + wctype_t *char_classes; + + /* If this character set is the non-matching list. */ + unsigned int non_match : 1; + + /* # of multibyte characters. */ + int nmbchars; + + /* # of collating symbols. */ + int ncoll_syms; + + /* # of equivalence classes. */ + int nequiv_classes; + + /* # of range expressions. */ + int nranges; + + /* # of character classes. */ + int nchar_classes; +} re_charset_t; +#endif /* RE_ENABLE_I18N */ + +typedef struct +{ + union + { + unsigned char c; /* for CHARACTER */ + re_bitset_ptr_t sbcset; /* for SIMPLE_BRACKET */ +#ifdef RE_ENABLE_I18N + re_charset_t *mbcset; /* for COMPLEX_BRACKET */ +#endif /* RE_ENABLE_I18N */ + int idx; /* for BACK_REF */ + re_context_type ctx_type; /* for ANCHOR */ + } opr; +#if __GNUC__ >= 2 + re_token_type_t type : 8; +#else + re_token_type_t type; +#endif + unsigned int constraint : 10; /* context constraint */ + unsigned int duplicated : 1; + unsigned int opt_subexp : 1; +#ifdef RE_ENABLE_I18N + unsigned int accept_mb : 1; + /* These 2 bits can be moved into the union if needed (e.g. if running out + of bits; move opr.c to opr.c.c and move the flags to opr.c.flags). */ + unsigned int mb_partial : 1; +#endif + unsigned int word_char : 1; +} re_token_t; + +#define IS_EPSILON_NODE(type) ((type) & EPSILON_BIT) + +struct re_string_t +{ + /* Indicate the raw buffer which is the original string passed as an + argument of regexec(), re_search(), etc.. */ + const unsigned char *raw_mbs; + /* Store the multibyte string. In case of "case insensitive mode" like + REG_ICASE, upper cases of the string are stored, otherwise MBS points + the same address that RAW_MBS points. */ + unsigned char *mbs; +#ifdef RE_ENABLE_I18N + /* Store the wide character string which is corresponding to MBS. */ + wint_t *wcs; + int *offsets; + mbstate_t cur_state; +#endif + /* Index in RAW_MBS. Each character mbs[i] corresponds to + raw_mbs[raw_mbs_idx + i]. */ + int raw_mbs_idx; + /* The length of the valid characters in the buffers. */ + int valid_len; + /* The corresponding number of bytes in raw_mbs array. */ + int valid_raw_len; + /* The length of the buffers MBS and WCS. */ + int bufs_len; + /* The index in MBS, which is updated by re_string_fetch_byte. */ + int cur_idx; + /* length of RAW_MBS array. */ + int raw_len; + /* This is RAW_LEN - RAW_MBS_IDX + VALID_LEN - VALID_RAW_LEN. */ + int len; + /* End of the buffer may be shorter than its length in the cases such + as re_match_2, re_search_2. Then, we use STOP for end of the buffer + instead of LEN. */ + int raw_stop; + /* This is RAW_STOP - RAW_MBS_IDX adjusted through OFFSETS. */ + int stop; + + /* The context of mbs[0]. We store the context independently, since + the context of mbs[0] may be different from raw_mbs[0], which is + the beginning of the input string. */ + unsigned int tip_context; + /* The translation passed as a part of an argument of re_compile_pattern. */ + RE_TRANSLATE_TYPE trans; + /* Copy of re_dfa_t's word_char. */ + re_const_bitset_ptr_t word_char; + /* 1 if REG_ICASE. */ + unsigned char icase; + unsigned char is_utf8; + unsigned char map_notascii; + unsigned char mbs_allocated; + unsigned char offsets_needed; + unsigned char newline_anchor; + unsigned char word_ops_used; + int mb_cur_max; +}; +typedef struct re_string_t re_string_t; + + +struct re_dfa_t; +typedef struct re_dfa_t re_dfa_t; + +#ifndef _LIBC +# ifdef __i386__ +# define internal_function __attribute ((regparm (3), stdcall)) +# else +# define internal_function +# endif +#endif + +#ifndef NOT_IN_libc +static reg_errcode_t re_string_realloc_buffers (re_string_t *pstr, + int new_buf_len) + internal_function; +# ifdef RE_ENABLE_I18N +static void build_wcs_buffer (re_string_t *pstr) internal_function; +static reg_errcode_t build_wcs_upper_buffer (re_string_t *pstr) + internal_function; +# endif /* RE_ENABLE_I18N */ +static void build_upper_buffer (re_string_t *pstr) internal_function; +static void re_string_translate_buffer (re_string_t *pstr) internal_function; +static unsigned int re_string_context_at (const re_string_t *input, int idx, + int eflags) + internal_function __attribute ((pure)); +#endif +#define re_string_peek_byte(pstr, offset) \ + ((pstr)->mbs[(pstr)->cur_idx + offset]) +#define re_string_fetch_byte(pstr) \ + ((pstr)->mbs[(pstr)->cur_idx++]) +#define re_string_first_byte(pstr, idx) \ + ((idx) == (pstr)->valid_len || (pstr)->wcs[idx] != WEOF) +#define re_string_is_single_byte_char(pstr, idx) \ + ((pstr)->wcs[idx] != WEOF && ((pstr)->valid_len == (idx) + 1 \ + || (pstr)->wcs[(idx) + 1] != WEOF)) +#define re_string_eoi(pstr) ((pstr)->stop <= (pstr)->cur_idx) +#define re_string_cur_idx(pstr) ((pstr)->cur_idx) +#define re_string_get_buffer(pstr) ((pstr)->mbs) +#define re_string_length(pstr) ((pstr)->len) +#define re_string_byte_at(pstr,idx) ((pstr)->mbs[idx]) +#define re_string_skip_bytes(pstr,idx) ((pstr)->cur_idx += (idx)) +#define re_string_set_index(pstr,idx) ((pstr)->cur_idx = (idx)) + +#ifndef _LIBC +# if HAVE_ALLOCA +# if (_MSC_VER) +# include <malloc.h> +# define __libc_use_alloca(n) 0 +# else +# include <alloca.h> +/* The OS usually guarantees only one guard page at the bottom of the stack, + and a page size can be as small as 4096 bytes. So we cannot safely + allocate anything larger than 4096 bytes. Also care for the possibility + of a few compiler-allocated temporary stack slots. */ +# define __libc_use_alloca(n) ((n) < 4032) +# endif +# else +/* alloca is implemented with malloc, so just use malloc. */ +# define __libc_use_alloca(n) 0 +# endif +#endif + +#define re_malloc(t,n) ((t *) malloc ((n) * sizeof (t))) +/* SunOS 4.1.x realloc doesn't accept null pointers: pre-Standard C. Sigh. */ +#define re_realloc(p,t,n) ((p != NULL) ? (t *) realloc (p,(n)*sizeof(t)) : (t *) calloc(n,sizeof(t))) +#define re_free(p) free (p) + +struct bin_tree_t +{ + struct bin_tree_t *parent; + struct bin_tree_t *left; + struct bin_tree_t *right; + struct bin_tree_t *first; + struct bin_tree_t *next; + + re_token_t token; + + /* `node_idx' is the index in dfa->nodes, if `type' == 0. + Otherwise `type' indicate the type of this node. */ + int node_idx; +}; +typedef struct bin_tree_t bin_tree_t; + +#define BIN_TREE_STORAGE_SIZE \ + ((1024 - sizeof (void *)) / sizeof (bin_tree_t)) + +struct bin_tree_storage_t +{ + struct bin_tree_storage_t *next; + bin_tree_t data[BIN_TREE_STORAGE_SIZE]; +}; +typedef struct bin_tree_storage_t bin_tree_storage_t; + +#define CONTEXT_WORD 1 +#define CONTEXT_NEWLINE (CONTEXT_WORD << 1) +#define CONTEXT_BEGBUF (CONTEXT_NEWLINE << 1) +#define CONTEXT_ENDBUF (CONTEXT_BEGBUF << 1) + +#define IS_WORD_CONTEXT(c) ((c) & CONTEXT_WORD) +#define IS_NEWLINE_CONTEXT(c) ((c) & CONTEXT_NEWLINE) +#define IS_BEGBUF_CONTEXT(c) ((c) & CONTEXT_BEGBUF) +#define IS_ENDBUF_CONTEXT(c) ((c) & CONTEXT_ENDBUF) +#define IS_ORDINARY_CONTEXT(c) ((c) == 0) + +#define IS_WORD_CHAR(ch) (isalnum (ch) || (ch) == '_') +#define IS_NEWLINE(ch) ((ch) == NEWLINE_CHAR) +#define IS_WIDE_WORD_CHAR(ch) (iswalnum (ch) || (ch) == L'_') +#define IS_WIDE_NEWLINE(ch) ((ch) == WIDE_NEWLINE_CHAR) + +#define NOT_SATISFY_PREV_CONSTRAINT(constraint,context) \ + ((((constraint) & PREV_WORD_CONSTRAINT) && !IS_WORD_CONTEXT (context)) \ + || ((constraint & PREV_NOTWORD_CONSTRAINT) && IS_WORD_CONTEXT (context)) \ + || ((constraint & PREV_NEWLINE_CONSTRAINT) && !IS_NEWLINE_CONTEXT (context))\ + || ((constraint & PREV_BEGBUF_CONSTRAINT) && !IS_BEGBUF_CONTEXT (context))) + +#define NOT_SATISFY_NEXT_CONSTRAINT(constraint,context) \ + ((((constraint) & NEXT_WORD_CONSTRAINT) && !IS_WORD_CONTEXT (context)) \ + || (((constraint) & NEXT_NOTWORD_CONSTRAINT) && IS_WORD_CONTEXT (context)) \ + || (((constraint) & NEXT_NEWLINE_CONSTRAINT) && !IS_NEWLINE_CONTEXT (context)) \ + || (((constraint) & NEXT_ENDBUF_CONSTRAINT) && !IS_ENDBUF_CONTEXT (context))) + +struct re_dfastate_t +{ + unsigned int hash; + re_node_set nodes; + re_node_set non_eps_nodes; + re_node_set inveclosure; + re_node_set *entrance_nodes; + struct re_dfastate_t **trtable, **word_trtable; + unsigned int context : 4; + unsigned int halt : 1; + /* If this state can accept `multi byte'. + Note that we refer to multibyte characters, and multi character + collating elements as `multi byte'. */ + unsigned int accept_mb : 1; + /* If this state has backreference node(s). */ + unsigned int has_backref : 1; + unsigned int has_constraint : 1; +}; +typedef struct re_dfastate_t re_dfastate_t; + +struct re_state_table_entry +{ + int num; + int alloc; + re_dfastate_t **array; +}; + +/* Array type used in re_sub_match_last_t and re_sub_match_top_t. */ + +typedef struct +{ + int next_idx; + int alloc; + re_dfastate_t **array; +} state_array_t; + +/* Store information about the node NODE whose type is OP_CLOSE_SUBEXP. */ + +typedef struct +{ + int node; + int str_idx; /* The position NODE match at. */ + state_array_t path; +} re_sub_match_last_t; + +/* Store information about the node NODE whose type is OP_OPEN_SUBEXP. + And information about the node, whose type is OP_CLOSE_SUBEXP, + corresponding to NODE is stored in LASTS. */ + +typedef struct +{ + int str_idx; + int node; + state_array_t *path; + int alasts; /* Allocation size of LASTS. */ + int nlasts; /* The number of LASTS. */ + re_sub_match_last_t **lasts; +} re_sub_match_top_t; + +struct re_backref_cache_entry +{ + int node; + int str_idx; + int subexp_from; + int subexp_to; + char more; + char unused; + unsigned short int eps_reachable_subexps_map; +}; + +typedef struct +{ + /* The string object corresponding to the input string. */ + re_string_t input; +#if defined _LIBC || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L) + const re_dfa_t *const dfa; +#else + const re_dfa_t *dfa; +#endif + /* EFLAGS of the argument of regexec. */ + int eflags; + /* Where the matching ends. */ + int match_last; + int last_node; + /* The state log used by the matcher. */ + re_dfastate_t **state_log; + int state_log_top; + /* Back reference cache. */ + int nbkref_ents; + int abkref_ents; + struct re_backref_cache_entry *bkref_ents; + int max_mb_elem_len; + int nsub_tops; + int asub_tops; + re_sub_match_top_t **sub_tops; +} re_match_context_t; + +typedef struct +{ + re_dfastate_t **sifted_states; + re_dfastate_t **limited_states; + int last_node; + int last_str_idx; + re_node_set limits; +} re_sift_context_t; + +struct re_fail_stack_ent_t +{ + int idx; + int node; + regmatch_t *regs; + re_node_set eps_via_nodes; +}; + +struct re_fail_stack_t +{ + int num; + int alloc; + struct re_fail_stack_ent_t *stack; +}; + +struct re_dfa_t +{ + re_token_t *nodes; + size_t nodes_alloc; + size_t nodes_len; + int *nexts; + int *org_indices; + re_node_set *edests; + re_node_set *eclosures; + re_node_set *inveclosures; + struct re_state_table_entry *state_table; + re_dfastate_t *init_state; + re_dfastate_t *init_state_word; + re_dfastate_t *init_state_nl; + re_dfastate_t *init_state_begbuf; + bin_tree_t *str_tree; + bin_tree_storage_t *str_tree_storage; + re_bitset_ptr_t sb_char; + int str_tree_storage_idx; + + /* number of subexpressions `re_nsub' is in regex_t. */ + unsigned int state_hash_mask; + int init_node; + int nbackref; /* The number of backreference in this dfa. */ + + /* Bitmap expressing which backreference is used. */ + bitset_word_t used_bkref_map; + bitset_word_t completed_bkref_map; + + unsigned int has_plural_match : 1; + /* If this dfa has "multibyte node", which is a backreference or + a node which can accept multibyte character or multi character + collating element. */ + unsigned int has_mb_node : 1; + unsigned int is_utf8 : 1; + unsigned int map_notascii : 1; + unsigned int word_ops_used : 1; + int mb_cur_max; + bitset_t word_char; + reg_syntax_t syntax; + int *subexp_map; +#ifdef DEBUG + char* re_str; +#endif +#if defined _LIBC + __libc_lock_define (, lock) +#endif +}; + +#define re_node_set_init_empty(set) memset (set, '\0', sizeof (re_node_set)) +#define re_node_set_remove(set,id) \ + (re_node_set_remove_at (set, re_node_set_contains (set, id) - 1)) +#define re_node_set_empty(p) ((p)->nelem = 0) +#define re_node_set_free(set) re_free ((set)->elems) + + +typedef enum +{ + SB_CHAR, + MB_CHAR, + EQUIV_CLASS, + COLL_SYM, + CHAR_CLASS +} bracket_elem_type; + +typedef struct +{ + bracket_elem_type type; + union + { + unsigned char ch; + unsigned char *name; + wchar_t wch; + } opr; +} bracket_elem_t; + + +/* Inline functions for bitset operation. */ +static inline void +bitset_not (bitset_t set) +{ + int bitset_i; + for (bitset_i = 0; bitset_i < BITSET_WORDS; ++bitset_i) + set[bitset_i] = ~set[bitset_i]; +} + +static inline void +bitset_merge (bitset_t dest, const bitset_t src) +{ + int bitset_i; + for (bitset_i = 0; bitset_i < BITSET_WORDS; ++bitset_i) + dest[bitset_i] |= src[bitset_i]; +} + +static inline void +bitset_mask (bitset_t dest, const bitset_t src) +{ + int bitset_i; + for (bitset_i = 0; bitset_i < BITSET_WORDS; ++bitset_i) + dest[bitset_i] &= src[bitset_i]; +} + +#ifdef RE_ENABLE_I18N +/* Inline functions for re_string. */ +static inline int +internal_function __attribute ((pure)) +re_string_char_size_at (const re_string_t *pstr, int idx) +{ + int byte_idx; + if (pstr->mb_cur_max == 1) + return 1; + for (byte_idx = 1; idx + byte_idx < pstr->valid_len; ++byte_idx) + if (pstr->wcs[idx + byte_idx] != WEOF) + break; + return byte_idx; +} + +static inline wint_t +internal_function __attribute ((pure)) +re_string_wchar_at (const re_string_t *pstr, int idx) +{ + if (pstr->mb_cur_max == 1) + return (wint_t) pstr->mbs[idx]; + return (wint_t) pstr->wcs[idx]; +} + +# ifndef NOT_IN_libc +static int +internal_function __attribute ((pure)) +re_string_elem_size_at (const re_string_t *pstr, int idx) +{ +# ifdef _LIBC + const unsigned char *p, *extra; + const int32_t *table, *indirect; + int32_t tmp; +# include <locale/weight.h> + uint_fast32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); + + if (nrules != 0) + { + table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); + extra = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAMB); + indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, + _NL_COLLATE_INDIRECTMB); + p = pstr->mbs + idx; + tmp = findidx (&p); + return p - pstr->mbs - idx; + } + else +# endif /* _LIBC */ + return 1; +} +# endif +#endif /* RE_ENABLE_I18N */ + +#endif /* _REGEX_INTERNAL_H */ diff --git a/compat/regex/regexec.c b/compat/regex/regexec.c new file mode 100644 index 0000000000..eb5e1d4439 --- /dev/null +++ b/compat/regex/regexec.c @@ -0,0 +1,4369 @@ +/* Extended regular expression matching and search library. + Copyright (C) 2002-2005, 2007, 2009, 2010 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Isamu Hasegawa <isamu@yamato.ibm.com>. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301 USA. */ + +static reg_errcode_t match_ctx_init (re_match_context_t *cache, int eflags, + int n) internal_function; +static void match_ctx_clean (re_match_context_t *mctx) internal_function; +static void match_ctx_free (re_match_context_t *cache) internal_function; +static reg_errcode_t match_ctx_add_entry (re_match_context_t *cache, int node, + int str_idx, int from, int to) + internal_function; +static int search_cur_bkref_entry (const re_match_context_t *mctx, int str_idx) + internal_function; +static reg_errcode_t match_ctx_add_subtop (re_match_context_t *mctx, int node, + int str_idx) internal_function; +static re_sub_match_last_t * match_ctx_add_sublast (re_sub_match_top_t *subtop, + int node, int str_idx) + internal_function; +static void sift_ctx_init (re_sift_context_t *sctx, re_dfastate_t **sifted_sts, + re_dfastate_t **limited_sts, int last_node, + int last_str_idx) + internal_function; +static reg_errcode_t re_search_internal (const regex_t *preg, + const char *string, int length, + int start, int range, int stop, + size_t nmatch, regmatch_t pmatch[], + int eflags); +static int re_search_2_stub (struct re_pattern_buffer *bufp, + const char *string1, int length1, + const char *string2, int length2, + int start, int range, struct re_registers *regs, + int stop, int ret_len); +static int re_search_stub (struct re_pattern_buffer *bufp, + const char *string, int length, int start, + int range, int stop, struct re_registers *regs, + int ret_len); +static unsigned re_copy_regs (struct re_registers *regs, regmatch_t *pmatch, + int nregs, int regs_allocated); +static reg_errcode_t prune_impossible_nodes (re_match_context_t *mctx); +static int check_matching (re_match_context_t *mctx, int fl_longest_match, + int *p_match_first) internal_function; +static int check_halt_state_context (const re_match_context_t *mctx, + const re_dfastate_t *state, int idx) + internal_function; +static void update_regs (const re_dfa_t *dfa, regmatch_t *pmatch, + regmatch_t *prev_idx_match, int cur_node, + int cur_idx, int nmatch) internal_function; +static reg_errcode_t push_fail_stack (struct re_fail_stack_t *fs, + int str_idx, int dest_node, int nregs, + regmatch_t *regs, + re_node_set *eps_via_nodes) + internal_function; +static reg_errcode_t set_regs (const regex_t *preg, + const re_match_context_t *mctx, + size_t nmatch, regmatch_t *pmatch, + int fl_backtrack) internal_function; +static reg_errcode_t free_fail_stack_return (struct re_fail_stack_t *fs) + internal_function; + +#ifdef RE_ENABLE_I18N +static int sift_states_iter_mb (const re_match_context_t *mctx, + re_sift_context_t *sctx, + int node_idx, int str_idx, int max_str_idx) + internal_function; +#endif /* RE_ENABLE_I18N */ +static reg_errcode_t sift_states_backward (const re_match_context_t *mctx, + re_sift_context_t *sctx) + internal_function; +static reg_errcode_t build_sifted_states (const re_match_context_t *mctx, + re_sift_context_t *sctx, int str_idx, + re_node_set *cur_dest) + internal_function; +static reg_errcode_t update_cur_sifted_state (const re_match_context_t *mctx, + re_sift_context_t *sctx, + int str_idx, + re_node_set *dest_nodes) + internal_function; +static reg_errcode_t add_epsilon_src_nodes (const re_dfa_t *dfa, + re_node_set *dest_nodes, + const re_node_set *candidates) + internal_function; +static int check_dst_limits (const re_match_context_t *mctx, + re_node_set *limits, + int dst_node, int dst_idx, int src_node, + int src_idx) internal_function; +static int check_dst_limits_calc_pos_1 (const re_match_context_t *mctx, + int boundaries, int subexp_idx, + int from_node, int bkref_idx) + internal_function; +static int check_dst_limits_calc_pos (const re_match_context_t *mctx, + int limit, int subexp_idx, + int node, int str_idx, + int bkref_idx) internal_function; +static reg_errcode_t check_subexp_limits (const re_dfa_t *dfa, + re_node_set *dest_nodes, + const re_node_set *candidates, + re_node_set *limits, + struct re_backref_cache_entry *bkref_ents, + int str_idx) internal_function; +static reg_errcode_t sift_states_bkref (const re_match_context_t *mctx, + re_sift_context_t *sctx, + int str_idx, const re_node_set *candidates) + internal_function; +static reg_errcode_t merge_state_array (const re_dfa_t *dfa, + re_dfastate_t **dst, + re_dfastate_t **src, int num) + internal_function; +static re_dfastate_t *find_recover_state (reg_errcode_t *err, + re_match_context_t *mctx) internal_function; +static re_dfastate_t *transit_state (reg_errcode_t *err, + re_match_context_t *mctx, + re_dfastate_t *state) internal_function; +static re_dfastate_t *merge_state_with_log (reg_errcode_t *err, + re_match_context_t *mctx, + re_dfastate_t *next_state) + internal_function; +static reg_errcode_t check_subexp_matching_top (re_match_context_t *mctx, + re_node_set *cur_nodes, + int str_idx) internal_function; +#if 0 +static re_dfastate_t *transit_state_sb (reg_errcode_t *err, + re_match_context_t *mctx, + re_dfastate_t *pstate) + internal_function; +#endif +#ifdef RE_ENABLE_I18N +static reg_errcode_t transit_state_mb (re_match_context_t *mctx, + re_dfastate_t *pstate) + internal_function; +#endif /* RE_ENABLE_I18N */ +static reg_errcode_t transit_state_bkref (re_match_context_t *mctx, + const re_node_set *nodes) + internal_function; +static reg_errcode_t get_subexp (re_match_context_t *mctx, + int bkref_node, int bkref_str_idx) + internal_function; +static reg_errcode_t get_subexp_sub (re_match_context_t *mctx, + const re_sub_match_top_t *sub_top, + re_sub_match_last_t *sub_last, + int bkref_node, int bkref_str) + internal_function; +static int find_subexp_node (const re_dfa_t *dfa, const re_node_set *nodes, + int subexp_idx, int type) internal_function; +static reg_errcode_t check_arrival (re_match_context_t *mctx, + state_array_t *path, int top_node, + int top_str, int last_node, int last_str, + int type) internal_function; +static reg_errcode_t check_arrival_add_next_nodes (re_match_context_t *mctx, + int str_idx, + re_node_set *cur_nodes, + re_node_set *next_nodes) + internal_function; +static reg_errcode_t check_arrival_expand_ecl (const re_dfa_t *dfa, + re_node_set *cur_nodes, + int ex_subexp, int type) + internal_function; +static reg_errcode_t check_arrival_expand_ecl_sub (const re_dfa_t *dfa, + re_node_set *dst_nodes, + int target, int ex_subexp, + int type) internal_function; +static reg_errcode_t expand_bkref_cache (re_match_context_t *mctx, + re_node_set *cur_nodes, int cur_str, + int subexp_num, int type) + internal_function; +static int build_trtable (const re_dfa_t *dfa, + re_dfastate_t *state) internal_function; +#ifdef RE_ENABLE_I18N +static int check_node_accept_bytes (const re_dfa_t *dfa, int node_idx, + const re_string_t *input, int idx) + internal_function; +# ifdef _LIBC +static unsigned int find_collation_sequence_value (const unsigned char *mbs, + size_t name_len) + internal_function; +# endif /* _LIBC */ +#endif /* RE_ENABLE_I18N */ +static int group_nodes_into_DFAstates (const re_dfa_t *dfa, + const re_dfastate_t *state, + re_node_set *states_node, + bitset_t *states_ch) internal_function; +static int check_node_accept (const re_match_context_t *mctx, + const re_token_t *node, int idx) + internal_function; +static reg_errcode_t extend_buffers (re_match_context_t *mctx) + internal_function; + +/* Entry point for POSIX code. */ + +/* regexec searches for a given pattern, specified by PREG, in the + string STRING. + + If NMATCH is zero or REG_NOSUB was set in the cflags argument to + `regcomp', we ignore PMATCH. Otherwise, we assume PMATCH has at + least NMATCH elements, and we set them to the offsets of the + corresponding matched substrings. + + EFLAGS specifies `execution flags' which affect matching: if + REG_NOTBOL is set, then ^ does not match at the beginning of the + string; if REG_NOTEOL is set, then $ does not match at the end. + + We return 0 if we find a match and REG_NOMATCH if not. */ + +int +regexec ( + const regex_t *__restrict preg, + const char *__restrict string, + size_t nmatch, + regmatch_t pmatch[], + int eflags) +{ + reg_errcode_t err; + int start, length; + + if (eflags & ~(REG_NOTBOL | REG_NOTEOL | REG_STARTEND)) + return REG_BADPAT; + + if (eflags & REG_STARTEND) + { + start = pmatch[0].rm_so; + length = pmatch[0].rm_eo; + } + else + { + start = 0; + length = strlen (string); + } + + __libc_lock_lock (dfa->lock); + if (preg->no_sub) + err = re_search_internal (preg, string, length, start, length - start, + length, 0, NULL, eflags); + else + err = re_search_internal (preg, string, length, start, length - start, + length, nmatch, pmatch, eflags); + __libc_lock_unlock (dfa->lock); + return err != REG_NOERROR; +} + +#ifdef _LIBC +# include <shlib-compat.h> +versioned_symbol (libc, __regexec, regexec, GLIBC_2_3_4); + +# if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_3_4) +__typeof__ (__regexec) __compat_regexec; + +int +attribute_compat_text_section +__compat_regexec (const regex_t *__restrict preg, + const char *__restrict string, size_t nmatch, + regmatch_t pmatch[], int eflags) +{ + return regexec (preg, string, nmatch, pmatch, + eflags & (REG_NOTBOL | REG_NOTEOL)); +} +compat_symbol (libc, __compat_regexec, regexec, GLIBC_2_0); +# endif +#endif + +/* Entry points for GNU code. */ + +/* re_match, re_search, re_match_2, re_search_2 + + The former two functions operate on STRING with length LENGTH, + while the later two operate on concatenation of STRING1 and STRING2 + with lengths LENGTH1 and LENGTH2, respectively. + + re_match() matches the compiled pattern in BUFP against the string, + starting at index START. + + re_search() first tries matching at index START, then it tries to match + starting from index START + 1, and so on. The last start position tried + is START + RANGE. (Thus RANGE = 0 forces re_search to operate the same + way as re_match().) + + The parameter STOP of re_{match,search}_2 specifies that no match exceeding + the first STOP characters of the concatenation of the strings should be + concerned. + + If REGS is not NULL, and BUFP->no_sub is not set, the offsets of the match + and all groups is stroed in REGS. (For the "_2" variants, the offsets are + computed relative to the concatenation, not relative to the individual + strings.) + + On success, re_match* functions return the length of the match, re_search* + return the position of the start of the match. Return value -1 means no + match was found and -2 indicates an internal error. */ + +int +re_match (struct re_pattern_buffer *bufp, + const char *string, + int length, + int start, + struct re_registers *regs) +{ + return re_search_stub (bufp, string, length, start, 0, length, regs, 1); +} +#ifdef _LIBC +weak_alias (__re_match, re_match) +#endif + +int +re_search (struct re_pattern_buffer *bufp, + const char *string, + int length, int start, int range, + struct re_registers *regs) +{ + return re_search_stub (bufp, string, length, start, range, length, regs, 0); +} +#ifdef _LIBC +weak_alias (__re_search, re_search) +#endif + +int +re_match_2 (struct re_pattern_buffer *bufp, + const char *string1, int length1, + const char *string2, int length2, int start, + struct re_registers *regs, int stop) +{ + return re_search_2_stub (bufp, string1, length1, string2, length2, + start, 0, regs, stop, 1); +} +#ifdef _LIBC +weak_alias (__re_match_2, re_match_2) +#endif + +int +re_search_2 (struct re_pattern_buffer *bufp, + const char *string1, int length1, + const char *string2, int length2, int start, + int range, struct re_registers *regs, int stop) +{ + return re_search_2_stub (bufp, string1, length1, string2, length2, + start, range, regs, stop, 0); +} +#ifdef _LIBC +weak_alias (__re_search_2, re_search_2) +#endif + +static int +re_search_2_stub (struct re_pattern_buffer *bufp, + const char *string1, int length1, + const char *string2, int length2, int start, + int range, struct re_registers *regs, + int stop, int ret_len) +{ + const char *str; + int rval; + int len = length1 + length2; + int free_str = 0; + + if (BE (length1 < 0 || length2 < 0 || stop < 0, 0)) + return -2; + + /* Concatenate the strings. */ + if (length2 > 0) + if (length1 > 0) + { + char *s = re_malloc (char, len); + + if (BE (s == NULL, 0)) + return -2; + memcpy (s, string1, length1); + memcpy (s + length1, string2, length2); + str = s; + free_str = 1; + } + else + str = string2; + else + str = string1; + + rval = re_search_stub (bufp, str, len, start, range, stop, regs, ret_len); + if (free_str) + re_free ((char *) str); + return rval; +} + +/* The parameters have the same meaning as those of re_search. + Additional parameters: + If RET_LEN is nonzero the length of the match is returned (re_match style); + otherwise the position of the match is returned. */ + +static int +re_search_stub (struct re_pattern_buffer *bufp, + const char *string, int length, int start, + int range, int stop, + struct re_registers *regs, int ret_len) +{ + reg_errcode_t result; + regmatch_t *pmatch; + int nregs, rval; + int eflags = 0; + + /* Check for out-of-range. */ + if (BE (start < 0 || start > length, 0)) + return -1; + if (BE (start + range > length, 0)) + range = length - start; + else if (BE (start + range < 0, 0)) + range = -start; + + __libc_lock_lock (dfa->lock); + + eflags |= (bufp->not_bol) ? REG_NOTBOL : 0; + eflags |= (bufp->not_eol) ? REG_NOTEOL : 0; + + /* Compile fastmap if we haven't yet. */ + if (range > 0 && bufp->fastmap != NULL && !bufp->fastmap_accurate) + re_compile_fastmap (bufp); + + if (BE (bufp->no_sub, 0)) + regs = NULL; + + /* We need at least 1 register. */ + if (regs == NULL) + nregs = 1; + else if (BE (bufp->regs_allocated == REGS_FIXED && + regs->num_regs < bufp->re_nsub + 1, 0)) + { + nregs = regs->num_regs; + if (BE (nregs < 1, 0)) + { + /* Nothing can be copied to regs. */ + regs = NULL; + nregs = 1; + } + } + else + nregs = bufp->re_nsub + 1; + pmatch = re_malloc (regmatch_t, nregs); + if (BE (pmatch == NULL, 0)) + { + rval = -2; + goto out; + } + + result = re_search_internal (bufp, string, length, start, range, stop, + nregs, pmatch, eflags); + + rval = 0; + + /* I hope we needn't fill their regs with -1's when no match was found. */ + if (result != REG_NOERROR) + rval = -1; + else if (regs != NULL) + { + /* If caller wants register contents data back, copy them. */ + bufp->regs_allocated = re_copy_regs (regs, pmatch, nregs, + bufp->regs_allocated); + if (BE (bufp->regs_allocated == REGS_UNALLOCATED, 0)) + rval = -2; + } + + if (BE (rval == 0, 1)) + { + if (ret_len) + { + assert (pmatch[0].rm_so == start); + rval = pmatch[0].rm_eo - start; + } + else + rval = pmatch[0].rm_so; + } + re_free (pmatch); + out: + __libc_lock_unlock (dfa->lock); + return rval; +} + +static unsigned +re_copy_regs (struct re_registers *regs, + regmatch_t *pmatch, + int nregs, int regs_allocated) +{ + int rval = REGS_REALLOCATE; + int i; + int need_regs = nregs + 1; + /* We need one extra element beyond `num_regs' for the `-1' marker GNU code + uses. */ + + /* Have the register data arrays been allocated? */ + if (regs_allocated == REGS_UNALLOCATED) + { /* No. So allocate them with malloc. */ + regs->start = re_malloc (regoff_t, need_regs); + if (BE (regs->start == NULL, 0)) + return REGS_UNALLOCATED; + regs->end = re_malloc (regoff_t, need_regs); + if (BE (regs->end == NULL, 0)) + { + re_free (regs->start); + return REGS_UNALLOCATED; + } + regs->num_regs = need_regs; + } + else if (regs_allocated == REGS_REALLOCATE) + { /* Yes. If we need more elements than were already + allocated, reallocate them. If we need fewer, just + leave it alone. */ + if (BE (need_regs > regs->num_regs, 0)) + { + regoff_t *new_start = re_realloc (regs->start, regoff_t, need_regs); + regoff_t *new_end; + if (BE (new_start == NULL, 0)) + return REGS_UNALLOCATED; + new_end = re_realloc (regs->end, regoff_t, need_regs); + if (BE (new_end == NULL, 0)) + { + re_free (new_start); + return REGS_UNALLOCATED; + } + regs->start = new_start; + regs->end = new_end; + regs->num_regs = need_regs; + } + } + else + { + assert (regs_allocated == REGS_FIXED); + /* This function may not be called with REGS_FIXED and nregs too big. */ + assert (regs->num_regs >= nregs); + rval = REGS_FIXED; + } + + /* Copy the regs. */ + for (i = 0; i < nregs; ++i) + { + regs->start[i] = pmatch[i].rm_so; + regs->end[i] = pmatch[i].rm_eo; + } + for ( ; i < regs->num_regs; ++i) + regs->start[i] = regs->end[i] = -1; + + return rval; +} + +/* Set REGS to hold NUM_REGS registers, storing them in STARTS and + ENDS. Subsequent matches using PATTERN_BUFFER and REGS will use + this memory for recording register information. STARTS and ENDS + must be allocated using the malloc library routine, and must each + be at least NUM_REGS * sizeof (regoff_t) bytes long. + + If NUM_REGS == 0, then subsequent matches should allocate their own + register data. + + Unless this function is called, the first search or match using + PATTERN_BUFFER will allocate its own register data, without + freeing the old data. */ + +void +re_set_registers (struct re_pattern_buffer *bufp, + struct re_registers *regs, + unsigned num_regs, + regoff_t *starts, + regoff_t *ends) +{ + if (num_regs) + { + bufp->regs_allocated = REGS_REALLOCATE; + regs->num_regs = num_regs; + regs->start = starts; + regs->end = ends; + } + else + { + bufp->regs_allocated = REGS_UNALLOCATED; + regs->num_regs = 0; + regs->start = regs->end = (regoff_t *) 0; + } +} +#ifdef _LIBC +weak_alias (__re_set_registers, re_set_registers) +#endif + +/* Entry points compatible with 4.2 BSD regex library. We don't define + them unless specifically requested. */ + +#if defined _REGEX_RE_COMP || defined _LIBC +int +# ifdef _LIBC +weak_function +# endif +re_exec (s) + const char *s; +{ + return 0 == regexec (&re_comp_buf, s, 0, NULL, 0); +} +#endif /* _REGEX_RE_COMP */ + +/* Internal entry point. */ + +/* Searches for a compiled pattern PREG in the string STRING, whose + length is LENGTH. NMATCH, PMATCH, and EFLAGS have the same + mingings with regexec. START, and RANGE have the same meanings + with re_search. + Return REG_NOERROR if we find a match, and REG_NOMATCH if not, + otherwise return the error code. + Note: We assume front end functions already check ranges. + (START + RANGE >= 0 && START + RANGE <= LENGTH) */ + +static reg_errcode_t +re_search_internal (const regex_t *preg, + const char *string, + int length, int start, int range, int stop, + size_t nmatch, regmatch_t pmatch[], + int eflags) +{ + reg_errcode_t err; + const re_dfa_t *dfa = (const re_dfa_t *) preg->buffer; + int left_lim, right_lim, incr; + int fl_longest_match, match_first, match_kind, match_last = -1; + int extra_nmatch; + int sb, ch; +#if defined _LIBC || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L) + re_match_context_t mctx = { .dfa = dfa }; +#else + re_match_context_t mctx; +#endif + char *fastmap = (preg->fastmap != NULL && preg->fastmap_accurate + && range && !preg->can_be_null) ? preg->fastmap : NULL; + RE_TRANSLATE_TYPE t = preg->translate; + +#if !(defined _LIBC || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 199901L)) + memset (&mctx, '\0', sizeof (re_match_context_t)); + mctx.dfa = dfa; +#endif + + extra_nmatch = (nmatch > preg->re_nsub) ? nmatch - (preg->re_nsub + 1) : 0; + nmatch -= extra_nmatch; + + /* Check if the DFA haven't been compiled. */ + if (BE (preg->used == 0 || dfa->init_state == NULL + || dfa->init_state_word == NULL || dfa->init_state_nl == NULL + || dfa->init_state_begbuf == NULL, 0)) + return REG_NOMATCH; + +#ifdef DEBUG + /* We assume front-end functions already check them. */ + assert (start + range >= 0 && start + range <= length); +#endif + + /* If initial states with non-begbuf contexts have no elements, + the regex must be anchored. If preg->newline_anchor is set, + we'll never use init_state_nl, so do not check it. */ + if (dfa->init_state->nodes.nelem == 0 + && dfa->init_state_word->nodes.nelem == 0 + && (dfa->init_state_nl->nodes.nelem == 0 + || !preg->newline_anchor)) + { + if (start != 0 && start + range != 0) + return REG_NOMATCH; + start = range = 0; + } + + /* We must check the longest matching, if nmatch > 0. */ + fl_longest_match = (nmatch != 0 || dfa->nbackref); + + err = re_string_allocate (&mctx.input, string, length, dfa->nodes_len + 1, + preg->translate, preg->syntax & RE_ICASE, dfa); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + mctx.input.stop = stop; + mctx.input.raw_stop = stop; + mctx.input.newline_anchor = preg->newline_anchor; + + err = match_ctx_init (&mctx, eflags, dfa->nbackref * 2); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + + /* We will log all the DFA states through which the dfa pass, + if nmatch > 1, or this dfa has "multibyte node", which is a + back-reference or a node which can accept multibyte character or + multi character collating element. */ + if (nmatch > 1 || dfa->has_mb_node) + { + /* Avoid overflow. */ + if (BE (SIZE_MAX / sizeof (re_dfastate_t *) <= mctx.input.bufs_len, 0)) + { + err = REG_ESPACE; + goto free_return; + } + + mctx.state_log = re_malloc (re_dfastate_t *, mctx.input.bufs_len + 1); + if (BE (mctx.state_log == NULL, 0)) + { + err = REG_ESPACE; + goto free_return; + } + } + else + mctx.state_log = NULL; + + match_first = start; + mctx.input.tip_context = (eflags & REG_NOTBOL) ? CONTEXT_BEGBUF + : CONTEXT_NEWLINE | CONTEXT_BEGBUF; + + /* Check incrementally whether of not the input string match. */ + incr = (range < 0) ? -1 : 1; + left_lim = (range < 0) ? start + range : start; + right_lim = (range < 0) ? start : start + range; + sb = dfa->mb_cur_max == 1; + match_kind = + (fastmap + ? ((sb || !(preg->syntax & RE_ICASE || t) ? 4 : 0) + | (range >= 0 ? 2 : 0) + | (t != NULL ? 1 : 0)) + : 8); + + for (;; match_first += incr) + { + err = REG_NOMATCH; + if (match_first < left_lim || right_lim < match_first) + goto free_return; + + /* Advance as rapidly as possible through the string, until we + find a plausible place to start matching. This may be done + with varying efficiency, so there are various possibilities: + only the most common of them are specialized, in order to + save on code size. We use a switch statement for speed. */ + switch (match_kind) + { + case 8: + /* No fastmap. */ + break; + + case 7: + /* Fastmap with single-byte translation, match forward. */ + while (BE (match_first < right_lim, 1) + && !fastmap[t[(unsigned char) string[match_first]]]) + ++match_first; + goto forward_match_found_start_or_reached_end; + + case 6: + /* Fastmap without translation, match forward. */ + while (BE (match_first < right_lim, 1) + && !fastmap[(unsigned char) string[match_first]]) + ++match_first; + + forward_match_found_start_or_reached_end: + if (BE (match_first == right_lim, 0)) + { + ch = match_first >= length + ? 0 : (unsigned char) string[match_first]; + if (!fastmap[t ? t[ch] : ch]) + goto free_return; + } + break; + + case 4: + case 5: + /* Fastmap without multi-byte translation, match backwards. */ + while (match_first >= left_lim) + { + ch = match_first >= length + ? 0 : (unsigned char) string[match_first]; + if (fastmap[t ? t[ch] : ch]) + break; + --match_first; + } + if (match_first < left_lim) + goto free_return; + break; + + default: + /* In this case, we can't determine easily the current byte, + since it might be a component byte of a multibyte + character. Then we use the constructed buffer instead. */ + for (;;) + { + /* If MATCH_FIRST is out of the valid range, reconstruct the + buffers. */ + unsigned int offset = match_first - mctx.input.raw_mbs_idx; + if (BE (offset >= (unsigned int) mctx.input.valid_raw_len, 0)) + { + err = re_string_reconstruct (&mctx.input, match_first, + eflags); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + + offset = match_first - mctx.input.raw_mbs_idx; + } + /* If MATCH_FIRST is out of the buffer, leave it as '\0'. + Note that MATCH_FIRST must not be smaller than 0. */ + ch = (match_first >= length + ? 0 : re_string_byte_at (&mctx.input, offset)); + if (fastmap[ch]) + break; + match_first += incr; + if (match_first < left_lim || match_first > right_lim) + { + err = REG_NOMATCH; + goto free_return; + } + } + break; + } + + /* Reconstruct the buffers so that the matcher can assume that + the matching starts from the beginning of the buffer. */ + err = re_string_reconstruct (&mctx.input, match_first, eflags); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + +#ifdef RE_ENABLE_I18N + /* Don't consider this char as a possible match start if it part, + yet isn't the head, of a multibyte character. */ + if (!sb && !re_string_first_byte (&mctx.input, 0)) + continue; +#endif + + /* It seems to be appropriate one, then use the matcher. */ + /* We assume that the matching starts from 0. */ + mctx.state_log_top = mctx.nbkref_ents = mctx.max_mb_elem_len = 0; + match_last = check_matching (&mctx, fl_longest_match, + range >= 0 ? &match_first : NULL); + if (match_last != -1) + { + if (BE (match_last == -2, 0)) + { + err = REG_ESPACE; + goto free_return; + } + else + { + mctx.match_last = match_last; + if ((!preg->no_sub && nmatch > 1) || dfa->nbackref) + { + re_dfastate_t *pstate = mctx.state_log[match_last]; + mctx.last_node = check_halt_state_context (&mctx, pstate, + match_last); + } + if ((!preg->no_sub && nmatch > 1 && dfa->has_plural_match) + || dfa->nbackref) + { + err = prune_impossible_nodes (&mctx); + if (err == REG_NOERROR) + break; + if (BE (err != REG_NOMATCH, 0)) + goto free_return; + match_last = -1; + } + else + break; /* We found a match. */ + } + } + + match_ctx_clean (&mctx); + } + +#ifdef DEBUG + assert (match_last != -1); + assert (err == REG_NOERROR); +#endif + + /* Set pmatch[] if we need. */ + if (nmatch > 0) + { + int reg_idx; + + /* Initialize registers. */ + for (reg_idx = 1; reg_idx < nmatch; ++reg_idx) + pmatch[reg_idx].rm_so = pmatch[reg_idx].rm_eo = -1; + + /* Set the points where matching start/end. */ + pmatch[0].rm_so = 0; + pmatch[0].rm_eo = mctx.match_last; + + if (!preg->no_sub && nmatch > 1) + { + err = set_regs (preg, &mctx, nmatch, pmatch, + dfa->has_plural_match && dfa->nbackref > 0); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + } + + /* At last, add the offset to the each registers, since we slided + the buffers so that we could assume that the matching starts + from 0. */ + for (reg_idx = 0; reg_idx < nmatch; ++reg_idx) + if (pmatch[reg_idx].rm_so != -1) + { +#ifdef RE_ENABLE_I18N + if (BE (mctx.input.offsets_needed != 0, 0)) + { + pmatch[reg_idx].rm_so = + (pmatch[reg_idx].rm_so == mctx.input.valid_len + ? mctx.input.valid_raw_len + : mctx.input.offsets[pmatch[reg_idx].rm_so]); + pmatch[reg_idx].rm_eo = + (pmatch[reg_idx].rm_eo == mctx.input.valid_len + ? mctx.input.valid_raw_len + : mctx.input.offsets[pmatch[reg_idx].rm_eo]); + } +#else + assert (mctx.input.offsets_needed == 0); +#endif + pmatch[reg_idx].rm_so += match_first; + pmatch[reg_idx].rm_eo += match_first; + } + for (reg_idx = 0; reg_idx < extra_nmatch; ++reg_idx) + { + pmatch[nmatch + reg_idx].rm_so = -1; + pmatch[nmatch + reg_idx].rm_eo = -1; + } + + if (dfa->subexp_map) + for (reg_idx = 0; reg_idx + 1 < nmatch; reg_idx++) + if (dfa->subexp_map[reg_idx] != reg_idx) + { + pmatch[reg_idx + 1].rm_so + = pmatch[dfa->subexp_map[reg_idx] + 1].rm_so; + pmatch[reg_idx + 1].rm_eo + = pmatch[dfa->subexp_map[reg_idx] + 1].rm_eo; + } + } + + free_return: + re_free (mctx.state_log); + if (dfa->nbackref) + match_ctx_free (&mctx); + re_string_destruct (&mctx.input); + return err; +} + +static reg_errcode_t +prune_impossible_nodes (re_match_context_t *mctx) +{ + const re_dfa_t *const dfa = mctx->dfa; + int halt_node, match_last; + reg_errcode_t ret; + re_dfastate_t **sifted_states; + re_dfastate_t **lim_states = NULL; + re_sift_context_t sctx; +#ifdef DEBUG + assert (mctx->state_log != NULL); +#endif + match_last = mctx->match_last; + halt_node = mctx->last_node; + + /* Avoid overflow. */ + if (BE (SIZE_MAX / sizeof (re_dfastate_t *) <= match_last, 0)) + return REG_ESPACE; + + sifted_states = re_malloc (re_dfastate_t *, match_last + 1); + if (BE (sifted_states == NULL, 0)) + { + ret = REG_ESPACE; + goto free_return; + } + if (dfa->nbackref) + { + lim_states = re_malloc (re_dfastate_t *, match_last + 1); + if (BE (lim_states == NULL, 0)) + { + ret = REG_ESPACE; + goto free_return; + } + while (1) + { + memset (lim_states, '\0', + sizeof (re_dfastate_t *) * (match_last + 1)); + sift_ctx_init (&sctx, sifted_states, lim_states, halt_node, + match_last); + ret = sift_states_backward (mctx, &sctx); + re_node_set_free (&sctx.limits); + if (BE (ret != REG_NOERROR, 0)) + goto free_return; + if (sifted_states[0] != NULL || lim_states[0] != NULL) + break; + do + { + --match_last; + if (match_last < 0) + { + ret = REG_NOMATCH; + goto free_return; + } + } while (mctx->state_log[match_last] == NULL + || !mctx->state_log[match_last]->halt); + halt_node = check_halt_state_context (mctx, + mctx->state_log[match_last], + match_last); + } + ret = merge_state_array (dfa, sifted_states, lim_states, + match_last + 1); + re_free (lim_states); + lim_states = NULL; + if (BE (ret != REG_NOERROR, 0)) + goto free_return; + } + else + { + sift_ctx_init (&sctx, sifted_states, lim_states, halt_node, match_last); + ret = sift_states_backward (mctx, &sctx); + re_node_set_free (&sctx.limits); + if (BE (ret != REG_NOERROR, 0)) + goto free_return; + if (sifted_states[0] == NULL) + { + ret = REG_NOMATCH; + goto free_return; + } + } + re_free (mctx->state_log); + mctx->state_log = sifted_states; + sifted_states = NULL; + mctx->last_node = halt_node; + mctx->match_last = match_last; + ret = REG_NOERROR; + free_return: + re_free (sifted_states); + re_free (lim_states); + return ret; +} + +/* Acquire an initial state and return it. + We must select appropriate initial state depending on the context, + since initial states may have constraints like "\<", "^", etc.. */ + +static inline re_dfastate_t * +__attribute ((always_inline)) internal_function +acquire_init_state_context (reg_errcode_t *err, const re_match_context_t *mctx, + int idx) +{ + const re_dfa_t *const dfa = mctx->dfa; + if (dfa->init_state->has_constraint) + { + unsigned int context; + context = re_string_context_at (&mctx->input, idx - 1, mctx->eflags); + if (IS_WORD_CONTEXT (context)) + return dfa->init_state_word; + else if (IS_ORDINARY_CONTEXT (context)) + return dfa->init_state; + else if (IS_BEGBUF_CONTEXT (context) && IS_NEWLINE_CONTEXT (context)) + return dfa->init_state_begbuf; + else if (IS_NEWLINE_CONTEXT (context)) + return dfa->init_state_nl; + else if (IS_BEGBUF_CONTEXT (context)) + { + /* It is relatively rare case, then calculate on demand. */ + return re_acquire_state_context (err, dfa, + dfa->init_state->entrance_nodes, + context); + } + else + /* Must not happen? */ + return dfa->init_state; + } + else + return dfa->init_state; +} + +/* Check whether the regular expression match input string INPUT or not, + and return the index where the matching end, return -1 if not match, + or return -2 in case of an error. + FL_LONGEST_MATCH means we want the POSIX longest matching. + If P_MATCH_FIRST is not NULL, and the match fails, it is set to the + next place where we may want to try matching. + Note that the matcher assume that the matching starts from the current + index of the buffer. */ + +static int +internal_function +check_matching (re_match_context_t *mctx, int fl_longest_match, + int *p_match_first) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err; + int match = 0; + int match_last = -1; + int cur_str_idx = re_string_cur_idx (&mctx->input); + re_dfastate_t *cur_state; + int at_init_state = p_match_first != NULL; + int next_start_idx = cur_str_idx; + + err = REG_NOERROR; + cur_state = acquire_init_state_context (&err, mctx, cur_str_idx); + /* An initial state must not be NULL (invalid). */ + if (BE (cur_state == NULL, 0)) + { + assert (err == REG_ESPACE); + return -2; + } + + if (mctx->state_log != NULL) + { + mctx->state_log[cur_str_idx] = cur_state; + + /* Check OP_OPEN_SUBEXP in the initial state in case that we use them + later. E.g. Processing back references. */ + if (BE (dfa->nbackref, 0)) + { + at_init_state = 0; + err = check_subexp_matching_top (mctx, &cur_state->nodes, 0); + if (BE (err != REG_NOERROR, 0)) + return err; + + if (cur_state->has_backref) + { + err = transit_state_bkref (mctx, &cur_state->nodes); + if (BE (err != REG_NOERROR, 0)) + return err; + } + } + } + + /* If the RE accepts NULL string. */ + if (BE (cur_state->halt, 0)) + { + if (!cur_state->has_constraint + || check_halt_state_context (mctx, cur_state, cur_str_idx)) + { + if (!fl_longest_match) + return cur_str_idx; + else + { + match_last = cur_str_idx; + match = 1; + } + } + } + + while (!re_string_eoi (&mctx->input)) + { + re_dfastate_t *old_state = cur_state; + int next_char_idx = re_string_cur_idx (&mctx->input) + 1; + + if (BE (next_char_idx >= mctx->input.bufs_len, 0) + || (BE (next_char_idx >= mctx->input.valid_len, 0) + && mctx->input.valid_len < mctx->input.len)) + { + err = extend_buffers (mctx); + if (BE (err != REG_NOERROR, 0)) + { + assert (err == REG_ESPACE); + return -2; + } + } + + cur_state = transit_state (&err, mctx, cur_state); + if (mctx->state_log != NULL) + cur_state = merge_state_with_log (&err, mctx, cur_state); + + if (cur_state == NULL) + { + /* Reached the invalid state or an error. Try to recover a valid + state using the state log, if available and if we have not + already found a valid (even if not the longest) match. */ + if (BE (err != REG_NOERROR, 0)) + return -2; + + if (mctx->state_log == NULL + || (match && !fl_longest_match) + || (cur_state = find_recover_state (&err, mctx)) == NULL) + break; + } + + if (BE (at_init_state, 0)) + { + if (old_state == cur_state) + next_start_idx = next_char_idx; + else + at_init_state = 0; + } + + if (cur_state->halt) + { + /* Reached a halt state. + Check the halt state can satisfy the current context. */ + if (!cur_state->has_constraint + || check_halt_state_context (mctx, cur_state, + re_string_cur_idx (&mctx->input))) + { + /* We found an appropriate halt state. */ + match_last = re_string_cur_idx (&mctx->input); + match = 1; + + /* We found a match, do not modify match_first below. */ + p_match_first = NULL; + if (!fl_longest_match) + break; + } + } + } + + if (p_match_first) + *p_match_first += next_start_idx; + + return match_last; +} + +/* Check NODE match the current context. */ + +static int +internal_function +check_halt_node_context (const re_dfa_t *dfa, int node, unsigned int context) +{ + re_token_type_t type = dfa->nodes[node].type; + unsigned int constraint = dfa->nodes[node].constraint; + if (type != END_OF_RE) + return 0; + if (!constraint) + return 1; + if (NOT_SATISFY_NEXT_CONSTRAINT (constraint, context)) + return 0; + return 1; +} + +/* Check the halt state STATE match the current context. + Return 0 if not match, if the node, STATE has, is a halt node and + match the context, return the node. */ + +static int +internal_function +check_halt_state_context (const re_match_context_t *mctx, + const re_dfastate_t *state, int idx) +{ + int i; + unsigned int context; +#ifdef DEBUG + assert (state->halt); +#endif + context = re_string_context_at (&mctx->input, idx, mctx->eflags); + for (i = 0; i < state->nodes.nelem; ++i) + if (check_halt_node_context (mctx->dfa, state->nodes.elems[i], context)) + return state->nodes.elems[i]; + return 0; +} + +/* Compute the next node to which "NFA" transit from NODE("NFA" is a NFA + corresponding to the DFA). + Return the destination node, and update EPS_VIA_NODES, return -1 in case + of errors. */ + +static int +internal_function +proceed_next_node (const re_match_context_t *mctx, int nregs, regmatch_t *regs, + int *pidx, int node, re_node_set *eps_via_nodes, + struct re_fail_stack_t *fs) +{ + const re_dfa_t *const dfa = mctx->dfa; + int i, err; + if (IS_EPSILON_NODE (dfa->nodes[node].type)) + { + re_node_set *cur_nodes = &mctx->state_log[*pidx]->nodes; + re_node_set *edests = &dfa->edests[node]; + int dest_node; + err = re_node_set_insert (eps_via_nodes, node); + if (BE (err < 0, 0)) + return -2; + /* Pick up a valid destination, or return -1 if none is found. */ + for (dest_node = -1, i = 0; i < edests->nelem; ++i) + { + int candidate = edests->elems[i]; + if (!re_node_set_contains (cur_nodes, candidate)) + continue; + if (dest_node == -1) + dest_node = candidate; + + else + { + /* In order to avoid infinite loop like "(a*)*", return the second + epsilon-transition if the first was already considered. */ + if (re_node_set_contains (eps_via_nodes, dest_node)) + return candidate; + + /* Otherwise, push the second epsilon-transition on the fail stack. */ + else if (fs != NULL + && push_fail_stack (fs, *pidx, candidate, nregs, regs, + eps_via_nodes)) + return -2; + + /* We know we are going to exit. */ + break; + } + } + return dest_node; + } + else + { + int naccepted = 0; + re_token_type_t type = dfa->nodes[node].type; + +#ifdef RE_ENABLE_I18N + if (dfa->nodes[node].accept_mb) + naccepted = check_node_accept_bytes (dfa, node, &mctx->input, *pidx); + else +#endif /* RE_ENABLE_I18N */ + if (type == OP_BACK_REF) + { + int subexp_idx = dfa->nodes[node].opr.idx + 1; + naccepted = regs[subexp_idx].rm_eo - regs[subexp_idx].rm_so; + if (fs != NULL) + { + if (regs[subexp_idx].rm_so == -1 || regs[subexp_idx].rm_eo == -1) + return -1; + else if (naccepted) + { + char *buf = (char *) re_string_get_buffer (&mctx->input); + if (memcmp (buf + regs[subexp_idx].rm_so, buf + *pidx, + naccepted) != 0) + return -1; + } + } + + if (naccepted == 0) + { + int dest_node; + err = re_node_set_insert (eps_via_nodes, node); + if (BE (err < 0, 0)) + return -2; + dest_node = dfa->edests[node].elems[0]; + if (re_node_set_contains (&mctx->state_log[*pidx]->nodes, + dest_node)) + return dest_node; + } + } + + if (naccepted != 0 + || check_node_accept (mctx, dfa->nodes + node, *pidx)) + { + int dest_node = dfa->nexts[node]; + *pidx = (naccepted == 0) ? *pidx + 1 : *pidx + naccepted; + if (fs && (*pidx > mctx->match_last || mctx->state_log[*pidx] == NULL + || !re_node_set_contains (&mctx->state_log[*pidx]->nodes, + dest_node))) + return -1; + re_node_set_empty (eps_via_nodes); + return dest_node; + } + } + return -1; +} + +static reg_errcode_t +internal_function +push_fail_stack (struct re_fail_stack_t *fs, int str_idx, int dest_node, + int nregs, regmatch_t *regs, re_node_set *eps_via_nodes) +{ + reg_errcode_t err; + int num = fs->num++; + if (fs->num == fs->alloc) + { + struct re_fail_stack_ent_t *new_array; + new_array = realloc (fs->stack, (sizeof (struct re_fail_stack_ent_t) + * fs->alloc * 2)); + if (new_array == NULL) + return REG_ESPACE; + fs->alloc *= 2; + fs->stack = new_array; + } + fs->stack[num].idx = str_idx; + fs->stack[num].node = dest_node; + fs->stack[num].regs = re_malloc (regmatch_t, nregs); + if (fs->stack[num].regs == NULL) + return REG_ESPACE; + memcpy (fs->stack[num].regs, regs, sizeof (regmatch_t) * nregs); + err = re_node_set_init_copy (&fs->stack[num].eps_via_nodes, eps_via_nodes); + return err; +} + +static int +internal_function +pop_fail_stack (struct re_fail_stack_t *fs, int *pidx, int nregs, + regmatch_t *regs, re_node_set *eps_via_nodes) +{ + int num = --fs->num; + assert (num >= 0); + *pidx = fs->stack[num].idx; + memcpy (regs, fs->stack[num].regs, sizeof (regmatch_t) * nregs); + re_node_set_free (eps_via_nodes); + re_free (fs->stack[num].regs); + *eps_via_nodes = fs->stack[num].eps_via_nodes; + return fs->stack[num].node; +} + +/* Set the positions where the subexpressions are starts/ends to registers + PMATCH. + Note: We assume that pmatch[0] is already set, and + pmatch[i].rm_so == pmatch[i].rm_eo == -1 for 0 < i < nmatch. */ + +static reg_errcode_t +internal_function +set_regs (const regex_t *preg, const re_match_context_t *mctx, size_t nmatch, + regmatch_t *pmatch, int fl_backtrack) +{ + const re_dfa_t *dfa = (const re_dfa_t *) preg->buffer; + int idx, cur_node; + re_node_set eps_via_nodes; + struct re_fail_stack_t *fs; + struct re_fail_stack_t fs_body = { 0, 2, NULL }; + regmatch_t *prev_idx_match; + int prev_idx_match_malloced = 0; + +#ifdef DEBUG + assert (nmatch > 1); + assert (mctx->state_log != NULL); +#endif + if (fl_backtrack) + { + fs = &fs_body; + fs->stack = re_malloc (struct re_fail_stack_ent_t, fs->alloc); + if (fs->stack == NULL) + return REG_ESPACE; + } + else + fs = NULL; + + cur_node = dfa->init_node; + re_node_set_init_empty (&eps_via_nodes); + +#ifdef HAVE_ALLOCA + if (__libc_use_alloca (nmatch * sizeof (regmatch_t))) + prev_idx_match = (regmatch_t *) alloca (nmatch * sizeof (regmatch_t)); + else +#endif + { + prev_idx_match = re_malloc (regmatch_t, nmatch); + if (prev_idx_match == NULL) + { + free_fail_stack_return (fs); + return REG_ESPACE; + } + prev_idx_match_malloced = 1; + } + memcpy (prev_idx_match, pmatch, sizeof (regmatch_t) * nmatch); + + for (idx = pmatch[0].rm_so; idx <= pmatch[0].rm_eo ;) + { + update_regs (dfa, pmatch, prev_idx_match, cur_node, idx, nmatch); + + if (idx == pmatch[0].rm_eo && cur_node == mctx->last_node) + { + int reg_idx; + if (fs) + { + for (reg_idx = 0; reg_idx < nmatch; ++reg_idx) + if (pmatch[reg_idx].rm_so > -1 && pmatch[reg_idx].rm_eo == -1) + break; + if (reg_idx == nmatch) + { + re_node_set_free (&eps_via_nodes); + if (prev_idx_match_malloced) + re_free (prev_idx_match); + return free_fail_stack_return (fs); + } + cur_node = pop_fail_stack (fs, &idx, nmatch, pmatch, + &eps_via_nodes); + } + else + { + re_node_set_free (&eps_via_nodes); + if (prev_idx_match_malloced) + re_free (prev_idx_match); + return REG_NOERROR; + } + } + + /* Proceed to next node. */ + cur_node = proceed_next_node (mctx, nmatch, pmatch, &idx, cur_node, + &eps_via_nodes, fs); + + if (BE (cur_node < 0, 0)) + { + if (BE (cur_node == -2, 0)) + { + re_node_set_free (&eps_via_nodes); + if (prev_idx_match_malloced) + re_free (prev_idx_match); + free_fail_stack_return (fs); + return REG_ESPACE; + } + if (fs) + cur_node = pop_fail_stack (fs, &idx, nmatch, pmatch, + &eps_via_nodes); + else + { + re_node_set_free (&eps_via_nodes); + if (prev_idx_match_malloced) + re_free (prev_idx_match); + return REG_NOMATCH; + } + } + } + re_node_set_free (&eps_via_nodes); + if (prev_idx_match_malloced) + re_free (prev_idx_match); + return free_fail_stack_return (fs); +} + +static reg_errcode_t +internal_function +free_fail_stack_return (struct re_fail_stack_t *fs) +{ + if (fs) + { + int fs_idx; + for (fs_idx = 0; fs_idx < fs->num; ++fs_idx) + { + re_node_set_free (&fs->stack[fs_idx].eps_via_nodes); + re_free (fs->stack[fs_idx].regs); + } + re_free (fs->stack); + } + return REG_NOERROR; +} + +static void +internal_function +update_regs (const re_dfa_t *dfa, regmatch_t *pmatch, + regmatch_t *prev_idx_match, int cur_node, int cur_idx, int nmatch) +{ + int type = dfa->nodes[cur_node].type; + if (type == OP_OPEN_SUBEXP) + { + int reg_num = dfa->nodes[cur_node].opr.idx + 1; + + /* We are at the first node of this sub expression. */ + if (reg_num < nmatch) + { + pmatch[reg_num].rm_so = cur_idx; + pmatch[reg_num].rm_eo = -1; + } + } + else if (type == OP_CLOSE_SUBEXP) + { + int reg_num = dfa->nodes[cur_node].opr.idx + 1; + if (reg_num < nmatch) + { + /* We are at the last node of this sub expression. */ + if (pmatch[reg_num].rm_so < cur_idx) + { + pmatch[reg_num].rm_eo = cur_idx; + /* This is a non-empty match or we are not inside an optional + subexpression. Accept this right away. */ + memcpy (prev_idx_match, pmatch, sizeof (regmatch_t) * nmatch); + } + else + { + if (dfa->nodes[cur_node].opt_subexp + && prev_idx_match[reg_num].rm_so != -1) + /* We transited through an empty match for an optional + subexpression, like (a?)*, and this is not the subexp's + first match. Copy back the old content of the registers + so that matches of an inner subexpression are undone as + well, like in ((a?))*. */ + memcpy (pmatch, prev_idx_match, sizeof (regmatch_t) * nmatch); + else + /* We completed a subexpression, but it may be part of + an optional one, so do not update PREV_IDX_MATCH. */ + pmatch[reg_num].rm_eo = cur_idx; + } + } + } +} + +/* This function checks the STATE_LOG from the SCTX->last_str_idx to 0 + and sift the nodes in each states according to the following rules. + Updated state_log will be wrote to STATE_LOG. + + Rules: We throw away the Node `a' in the STATE_LOG[STR_IDX] if... + 1. When STR_IDX == MATCH_LAST(the last index in the state_log): + If `a' isn't the LAST_NODE and `a' can't epsilon transit to + the LAST_NODE, we throw away the node `a'. + 2. When 0 <= STR_IDX < MATCH_LAST and `a' accepts + string `s' and transit to `b': + i. If 'b' isn't in the STATE_LOG[STR_IDX+strlen('s')], we throw + away the node `a'. + ii. If 'b' is in the STATE_LOG[STR_IDX+strlen('s')] but 'b' is + thrown away, we throw away the node `a'. + 3. When 0 <= STR_IDX < MATCH_LAST and 'a' epsilon transit to 'b': + i. If 'b' isn't in the STATE_LOG[STR_IDX], we throw away the + node `a'. + ii. If 'b' is in the STATE_LOG[STR_IDX] but 'b' is thrown away, + we throw away the node `a'. */ + +#define STATE_NODE_CONTAINS(state,node) \ + ((state) != NULL && re_node_set_contains (&(state)->nodes, node)) + +static reg_errcode_t +internal_function +sift_states_backward (const re_match_context_t *mctx, re_sift_context_t *sctx) +{ + reg_errcode_t err; + int null_cnt = 0; + int str_idx = sctx->last_str_idx; + re_node_set cur_dest; + +#ifdef DEBUG + assert (mctx->state_log != NULL && mctx->state_log[str_idx] != NULL); +#endif + + /* Build sifted state_log[str_idx]. It has the nodes which can epsilon + transit to the last_node and the last_node itself. */ + err = re_node_set_init_1 (&cur_dest, sctx->last_node); + if (BE (err != REG_NOERROR, 0)) + return err; + err = update_cur_sifted_state (mctx, sctx, str_idx, &cur_dest); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + + /* Then check each states in the state_log. */ + while (str_idx > 0) + { + /* Update counters. */ + null_cnt = (sctx->sifted_states[str_idx] == NULL) ? null_cnt + 1 : 0; + if (null_cnt > mctx->max_mb_elem_len) + { + memset (sctx->sifted_states, '\0', + sizeof (re_dfastate_t *) * str_idx); + re_node_set_free (&cur_dest); + return REG_NOERROR; + } + re_node_set_empty (&cur_dest); + --str_idx; + + if (mctx->state_log[str_idx]) + { + err = build_sifted_states (mctx, sctx, str_idx, &cur_dest); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + } + + /* Add all the nodes which satisfy the following conditions: + - It can epsilon transit to a node in CUR_DEST. + - It is in CUR_SRC. + And update state_log. */ + err = update_cur_sifted_state (mctx, sctx, str_idx, &cur_dest); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + } + err = REG_NOERROR; + free_return: + re_node_set_free (&cur_dest); + return err; +} + +static reg_errcode_t +internal_function +build_sifted_states (const re_match_context_t *mctx, re_sift_context_t *sctx, + int str_idx, re_node_set *cur_dest) +{ + const re_dfa_t *const dfa = mctx->dfa; + const re_node_set *cur_src = &mctx->state_log[str_idx]->non_eps_nodes; + int i; + + /* Then build the next sifted state. + We build the next sifted state on `cur_dest', and update + `sifted_states[str_idx]' with `cur_dest'. + Note: + `cur_dest' is the sifted state from `state_log[str_idx + 1]'. + `cur_src' points the node_set of the old `state_log[str_idx]' + (with the epsilon nodes pre-filtered out). */ + for (i = 0; i < cur_src->nelem; i++) + { + int prev_node = cur_src->elems[i]; + int naccepted = 0; + int ret; + +#ifdef DEBUG + re_token_type_t type = dfa->nodes[prev_node].type; + assert (!IS_EPSILON_NODE (type)); +#endif +#ifdef RE_ENABLE_I18N + /* If the node may accept `multi byte'. */ + if (dfa->nodes[prev_node].accept_mb) + naccepted = sift_states_iter_mb (mctx, sctx, prev_node, + str_idx, sctx->last_str_idx); +#endif /* RE_ENABLE_I18N */ + + /* We don't check backreferences here. + See update_cur_sifted_state(). */ + if (!naccepted + && check_node_accept (mctx, dfa->nodes + prev_node, str_idx) + && STATE_NODE_CONTAINS (sctx->sifted_states[str_idx + 1], + dfa->nexts[prev_node])) + naccepted = 1; + + if (naccepted == 0) + continue; + + if (sctx->limits.nelem) + { + int to_idx = str_idx + naccepted; + if (check_dst_limits (mctx, &sctx->limits, + dfa->nexts[prev_node], to_idx, + prev_node, str_idx)) + continue; + } + ret = re_node_set_insert (cur_dest, prev_node); + if (BE (ret == -1, 0)) + return REG_ESPACE; + } + + return REG_NOERROR; +} + +/* Helper functions. */ + +static reg_errcode_t +internal_function +clean_state_log_if_needed (re_match_context_t *mctx, int next_state_log_idx) +{ + int top = mctx->state_log_top; + + if (next_state_log_idx >= mctx->input.bufs_len + || (next_state_log_idx >= mctx->input.valid_len + && mctx->input.valid_len < mctx->input.len)) + { + reg_errcode_t err; + err = extend_buffers (mctx); + if (BE (err != REG_NOERROR, 0)) + return err; + } + + if (top < next_state_log_idx) + { + memset (mctx->state_log + top + 1, '\0', + sizeof (re_dfastate_t *) * (next_state_log_idx - top)); + mctx->state_log_top = next_state_log_idx; + } + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +merge_state_array (const re_dfa_t *dfa, re_dfastate_t **dst, + re_dfastate_t **src, int num) +{ + int st_idx; + reg_errcode_t err; + for (st_idx = 0; st_idx < num; ++st_idx) + { + if (dst[st_idx] == NULL) + dst[st_idx] = src[st_idx]; + else if (src[st_idx] != NULL) + { + re_node_set merged_set; + err = re_node_set_init_union (&merged_set, &dst[st_idx]->nodes, + &src[st_idx]->nodes); + if (BE (err != REG_NOERROR, 0)) + return err; + dst[st_idx] = re_acquire_state (&err, dfa, &merged_set); + re_node_set_free (&merged_set); + if (BE (err != REG_NOERROR, 0)) + return err; + } + } + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +update_cur_sifted_state (const re_match_context_t *mctx, + re_sift_context_t *sctx, int str_idx, + re_node_set *dest_nodes) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err = REG_NOERROR; + const re_node_set *candidates; + candidates = ((mctx->state_log[str_idx] == NULL) ? NULL + : &mctx->state_log[str_idx]->nodes); + + if (dest_nodes->nelem == 0) + sctx->sifted_states[str_idx] = NULL; + else + { + if (candidates) + { + /* At first, add the nodes which can epsilon transit to a node in + DEST_NODE. */ + err = add_epsilon_src_nodes (dfa, dest_nodes, candidates); + if (BE (err != REG_NOERROR, 0)) + return err; + + /* Then, check the limitations in the current sift_context. */ + if (sctx->limits.nelem) + { + err = check_subexp_limits (dfa, dest_nodes, candidates, &sctx->limits, + mctx->bkref_ents, str_idx); + if (BE (err != REG_NOERROR, 0)) + return err; + } + } + + sctx->sifted_states[str_idx] = re_acquire_state (&err, dfa, dest_nodes); + if (BE (err != REG_NOERROR, 0)) + return err; + } + + if (candidates && mctx->state_log[str_idx]->has_backref) + { + err = sift_states_bkref (mctx, sctx, str_idx, candidates); + if (BE (err != REG_NOERROR, 0)) + return err; + } + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +add_epsilon_src_nodes (const re_dfa_t *dfa, re_node_set *dest_nodes, + const re_node_set *candidates) +{ + reg_errcode_t err = REG_NOERROR; + int i; + + re_dfastate_t *state = re_acquire_state (&err, dfa, dest_nodes); + if (BE (err != REG_NOERROR, 0)) + return err; + + if (!state->inveclosure.alloc) + { + err = re_node_set_alloc (&state->inveclosure, dest_nodes->nelem); + if (BE (err != REG_NOERROR, 0)) + return REG_ESPACE; + for (i = 0; i < dest_nodes->nelem; i++) + { + err = re_node_set_merge (&state->inveclosure, + dfa->inveclosures + dest_nodes->elems[i]); + if (BE (err != REG_NOERROR, 0)) + return REG_ESPACE; + } + } + return re_node_set_add_intersect (dest_nodes, candidates, + &state->inveclosure); +} + +static reg_errcode_t +internal_function +sub_epsilon_src_nodes (const re_dfa_t *dfa, int node, re_node_set *dest_nodes, + const re_node_set *candidates) +{ + int ecl_idx; + reg_errcode_t err; + re_node_set *inv_eclosure = dfa->inveclosures + node; + re_node_set except_nodes; + re_node_set_init_empty (&except_nodes); + for (ecl_idx = 0; ecl_idx < inv_eclosure->nelem; ++ecl_idx) + { + int cur_node = inv_eclosure->elems[ecl_idx]; + if (cur_node == node) + continue; + if (IS_EPSILON_NODE (dfa->nodes[cur_node].type)) + { + int edst1 = dfa->edests[cur_node].elems[0]; + int edst2 = ((dfa->edests[cur_node].nelem > 1) + ? dfa->edests[cur_node].elems[1] : -1); + if ((!re_node_set_contains (inv_eclosure, edst1) + && re_node_set_contains (dest_nodes, edst1)) + || (edst2 > 0 + && !re_node_set_contains (inv_eclosure, edst2) + && re_node_set_contains (dest_nodes, edst2))) + { + err = re_node_set_add_intersect (&except_nodes, candidates, + dfa->inveclosures + cur_node); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&except_nodes); + return err; + } + } + } + } + for (ecl_idx = 0; ecl_idx < inv_eclosure->nelem; ++ecl_idx) + { + int cur_node = inv_eclosure->elems[ecl_idx]; + if (!re_node_set_contains (&except_nodes, cur_node)) + { + int idx = re_node_set_contains (dest_nodes, cur_node) - 1; + re_node_set_remove_at (dest_nodes, idx); + } + } + re_node_set_free (&except_nodes); + return REG_NOERROR; +} + +static int +internal_function +check_dst_limits (const re_match_context_t *mctx, re_node_set *limits, + int dst_node, int dst_idx, int src_node, int src_idx) +{ + const re_dfa_t *const dfa = mctx->dfa; + int lim_idx, src_pos, dst_pos; + + int dst_bkref_idx = search_cur_bkref_entry (mctx, dst_idx); + int src_bkref_idx = search_cur_bkref_entry (mctx, src_idx); + for (lim_idx = 0; lim_idx < limits->nelem; ++lim_idx) + { + int subexp_idx; + struct re_backref_cache_entry *ent; + ent = mctx->bkref_ents + limits->elems[lim_idx]; + subexp_idx = dfa->nodes[ent->node].opr.idx; + + dst_pos = check_dst_limits_calc_pos (mctx, limits->elems[lim_idx], + subexp_idx, dst_node, dst_idx, + dst_bkref_idx); + src_pos = check_dst_limits_calc_pos (mctx, limits->elems[lim_idx], + subexp_idx, src_node, src_idx, + src_bkref_idx); + + /* In case of: + <src> <dst> ( <subexp> ) + ( <subexp> ) <src> <dst> + ( <subexp1> <src> <subexp2> <dst> <subexp3> ) */ + if (src_pos == dst_pos) + continue; /* This is unrelated limitation. */ + else + return 1; + } + return 0; +} + +static int +internal_function +check_dst_limits_calc_pos_1 (const re_match_context_t *mctx, int boundaries, + int subexp_idx, int from_node, int bkref_idx) +{ + const re_dfa_t *const dfa = mctx->dfa; + const re_node_set *eclosures = dfa->eclosures + from_node; + int node_idx; + + /* Else, we are on the boundary: examine the nodes on the epsilon + closure. */ + for (node_idx = 0; node_idx < eclosures->nelem; ++node_idx) + { + int node = eclosures->elems[node_idx]; + switch (dfa->nodes[node].type) + { + case OP_BACK_REF: + if (bkref_idx != -1) + { + struct re_backref_cache_entry *ent = mctx->bkref_ents + bkref_idx; + do + { + int dst, cpos; + + if (ent->node != node) + continue; + + if (subexp_idx < BITSET_WORD_BITS + && !(ent->eps_reachable_subexps_map + & ((bitset_word_t) 1 << subexp_idx))) + continue; + + /* Recurse trying to reach the OP_OPEN_SUBEXP and + OP_CLOSE_SUBEXP cases below. But, if the + destination node is the same node as the source + node, don't recurse because it would cause an + infinite loop: a regex that exhibits this behavior + is ()\1*\1* */ + dst = dfa->edests[node].elems[0]; + if (dst == from_node) + { + if (boundaries & 1) + return -1; + else /* if (boundaries & 2) */ + return 0; + } + + cpos = + check_dst_limits_calc_pos_1 (mctx, boundaries, subexp_idx, + dst, bkref_idx); + if (cpos == -1 /* && (boundaries & 1) */) + return -1; + if (cpos == 0 && (boundaries & 2)) + return 0; + + if (subexp_idx < BITSET_WORD_BITS) + ent->eps_reachable_subexps_map + &= ~((bitset_word_t) 1 << subexp_idx); + } + while (ent++->more); + } + break; + + case OP_OPEN_SUBEXP: + if ((boundaries & 1) && subexp_idx == dfa->nodes[node].opr.idx) + return -1; + break; + + case OP_CLOSE_SUBEXP: + if ((boundaries & 2) && subexp_idx == dfa->nodes[node].opr.idx) + return 0; + break; + + default: + break; + } + } + + return (boundaries & 2) ? 1 : 0; +} + +static int +internal_function +check_dst_limits_calc_pos (const re_match_context_t *mctx, int limit, + int subexp_idx, int from_node, int str_idx, + int bkref_idx) +{ + struct re_backref_cache_entry *lim = mctx->bkref_ents + limit; + int boundaries; + + /* If we are outside the range of the subexpression, return -1 or 1. */ + if (str_idx < lim->subexp_from) + return -1; + + if (lim->subexp_to < str_idx) + return 1; + + /* If we are within the subexpression, return 0. */ + boundaries = (str_idx == lim->subexp_from); + boundaries |= (str_idx == lim->subexp_to) << 1; + if (boundaries == 0) + return 0; + + /* Else, examine epsilon closure. */ + return check_dst_limits_calc_pos_1 (mctx, boundaries, subexp_idx, + from_node, bkref_idx); +} + +/* Check the limitations of sub expressions LIMITS, and remove the nodes + which are against limitations from DEST_NODES. */ + +static reg_errcode_t +internal_function +check_subexp_limits (const re_dfa_t *dfa, re_node_set *dest_nodes, + const re_node_set *candidates, re_node_set *limits, + struct re_backref_cache_entry *bkref_ents, int str_idx) +{ + reg_errcode_t err; + int node_idx, lim_idx; + + for (lim_idx = 0; lim_idx < limits->nelem; ++lim_idx) + { + int subexp_idx; + struct re_backref_cache_entry *ent; + ent = bkref_ents + limits->elems[lim_idx]; + + if (str_idx <= ent->subexp_from || ent->str_idx < str_idx) + continue; /* This is unrelated limitation. */ + + subexp_idx = dfa->nodes[ent->node].opr.idx; + if (ent->subexp_to == str_idx) + { + int ops_node = -1; + int cls_node = -1; + for (node_idx = 0; node_idx < dest_nodes->nelem; ++node_idx) + { + int node = dest_nodes->elems[node_idx]; + re_token_type_t type = dfa->nodes[node].type; + if (type == OP_OPEN_SUBEXP + && subexp_idx == dfa->nodes[node].opr.idx) + ops_node = node; + else if (type == OP_CLOSE_SUBEXP + && subexp_idx == dfa->nodes[node].opr.idx) + cls_node = node; + } + + /* Check the limitation of the open subexpression. */ + /* Note that (ent->subexp_to = str_idx != ent->subexp_from). */ + if (ops_node >= 0) + { + err = sub_epsilon_src_nodes (dfa, ops_node, dest_nodes, + candidates); + if (BE (err != REG_NOERROR, 0)) + return err; + } + + /* Check the limitation of the close subexpression. */ + if (cls_node >= 0) + for (node_idx = 0; node_idx < dest_nodes->nelem; ++node_idx) + { + int node = dest_nodes->elems[node_idx]; + if (!re_node_set_contains (dfa->inveclosures + node, + cls_node) + && !re_node_set_contains (dfa->eclosures + node, + cls_node)) + { + /* It is against this limitation. + Remove it form the current sifted state. */ + err = sub_epsilon_src_nodes (dfa, node, dest_nodes, + candidates); + if (BE (err != REG_NOERROR, 0)) + return err; + --node_idx; + } + } + } + else /* (ent->subexp_to != str_idx) */ + { + for (node_idx = 0; node_idx < dest_nodes->nelem; ++node_idx) + { + int node = dest_nodes->elems[node_idx]; + re_token_type_t type = dfa->nodes[node].type; + if (type == OP_CLOSE_SUBEXP || type == OP_OPEN_SUBEXP) + { + if (subexp_idx != dfa->nodes[node].opr.idx) + continue; + /* It is against this limitation. + Remove it form the current sifted state. */ + err = sub_epsilon_src_nodes (dfa, node, dest_nodes, + candidates); + if (BE (err != REG_NOERROR, 0)) + return err; + } + } + } + } + return REG_NOERROR; +} + +static reg_errcode_t +internal_function +sift_states_bkref (const re_match_context_t *mctx, re_sift_context_t *sctx, + int str_idx, const re_node_set *candidates) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err; + int node_idx, node; + re_sift_context_t local_sctx; + int first_idx = search_cur_bkref_entry (mctx, str_idx); + + if (first_idx == -1) + return REG_NOERROR; + + local_sctx.sifted_states = NULL; /* Mark that it hasn't been initialized. */ + + for (node_idx = 0; node_idx < candidates->nelem; ++node_idx) + { + int enabled_idx; + re_token_type_t type; + struct re_backref_cache_entry *entry; + node = candidates->elems[node_idx]; + type = dfa->nodes[node].type; + /* Avoid infinite loop for the REs like "()\1+". */ + if (node == sctx->last_node && str_idx == sctx->last_str_idx) + continue; + if (type != OP_BACK_REF) + continue; + + entry = mctx->bkref_ents + first_idx; + enabled_idx = first_idx; + do + { + int subexp_len; + int to_idx; + int dst_node; + int ret; + re_dfastate_t *cur_state; + + if (entry->node != node) + continue; + subexp_len = entry->subexp_to - entry->subexp_from; + to_idx = str_idx + subexp_len; + dst_node = (subexp_len ? dfa->nexts[node] + : dfa->edests[node].elems[0]); + + if (to_idx > sctx->last_str_idx + || sctx->sifted_states[to_idx] == NULL + || !STATE_NODE_CONTAINS (sctx->sifted_states[to_idx], dst_node) + || check_dst_limits (mctx, &sctx->limits, node, + str_idx, dst_node, to_idx)) + continue; + + if (local_sctx.sifted_states == NULL) + { + local_sctx = *sctx; + err = re_node_set_init_copy (&local_sctx.limits, &sctx->limits); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + } + local_sctx.last_node = node; + local_sctx.last_str_idx = str_idx; + ret = re_node_set_insert (&local_sctx.limits, enabled_idx); + if (BE (ret < 0, 0)) + { + err = REG_ESPACE; + goto free_return; + } + cur_state = local_sctx.sifted_states[str_idx]; + err = sift_states_backward (mctx, &local_sctx); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + if (sctx->limited_states != NULL) + { + err = merge_state_array (dfa, sctx->limited_states, + local_sctx.sifted_states, + str_idx + 1); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + } + local_sctx.sifted_states[str_idx] = cur_state; + re_node_set_remove (&local_sctx.limits, enabled_idx); + + /* mctx->bkref_ents may have changed, reload the pointer. */ + entry = mctx->bkref_ents + enabled_idx; + } + while (enabled_idx++, entry++->more); + } + err = REG_NOERROR; + free_return: + if (local_sctx.sifted_states != NULL) + { + re_node_set_free (&local_sctx.limits); + } + + return err; +} + + +#ifdef RE_ENABLE_I18N +static int +internal_function +sift_states_iter_mb (const re_match_context_t *mctx, re_sift_context_t *sctx, + int node_idx, int str_idx, int max_str_idx) +{ + const re_dfa_t *const dfa = mctx->dfa; + int naccepted; + /* Check the node can accept `multi byte'. */ + naccepted = check_node_accept_bytes (dfa, node_idx, &mctx->input, str_idx); + if (naccepted > 0 && str_idx + naccepted <= max_str_idx && + !STATE_NODE_CONTAINS (sctx->sifted_states[str_idx + naccepted], + dfa->nexts[node_idx])) + /* The node can't accept the `multi byte', or the + destination was already thrown away, then the node + couldn't accept the current input `multi byte'. */ + naccepted = 0; + /* Otherwise, it is sure that the node could accept + `naccepted' bytes input. */ + return naccepted; +} +#endif /* RE_ENABLE_I18N */ + + +/* Functions for state transition. */ + +/* Return the next state to which the current state STATE will transit by + accepting the current input byte, and update STATE_LOG if necessary. + If STATE can accept a multibyte char/collating element/back reference + update the destination of STATE_LOG. */ + +static re_dfastate_t * +internal_function +transit_state (reg_errcode_t *err, re_match_context_t *mctx, + re_dfastate_t *state) +{ + re_dfastate_t **trtable; + unsigned char ch; + +#ifdef RE_ENABLE_I18N + /* If the current state can accept multibyte. */ + if (BE (state->accept_mb, 0)) + { + *err = transit_state_mb (mctx, state); + if (BE (*err != REG_NOERROR, 0)) + return NULL; + } +#endif /* RE_ENABLE_I18N */ + + /* Then decide the next state with the single byte. */ +#if 0 + if (0) + /* don't use transition table */ + return transit_state_sb (err, mctx, state); +#endif + + /* Use transition table */ + ch = re_string_fetch_byte (&mctx->input); + for (;;) + { + trtable = state->trtable; + if (BE (trtable != NULL, 1)) + return trtable[ch]; + + trtable = state->word_trtable; + if (BE (trtable != NULL, 1)) + { + unsigned int context; + context + = re_string_context_at (&mctx->input, + re_string_cur_idx (&mctx->input) - 1, + mctx->eflags); + if (IS_WORD_CONTEXT (context)) + return trtable[ch + SBC_MAX]; + else + return trtable[ch]; + } + + if (!build_trtable (mctx->dfa, state)) + { + *err = REG_ESPACE; + return NULL; + } + + /* Retry, we now have a transition table. */ + } +} + +/* Update the state_log if we need */ +static re_dfastate_t * +internal_function +merge_state_with_log (reg_errcode_t *err, re_match_context_t *mctx, + re_dfastate_t *next_state) +{ + const re_dfa_t *const dfa = mctx->dfa; + int cur_idx = re_string_cur_idx (&mctx->input); + + if (cur_idx > mctx->state_log_top) + { + mctx->state_log[cur_idx] = next_state; + mctx->state_log_top = cur_idx; + } + else if (mctx->state_log[cur_idx] == NULL) + { + mctx->state_log[cur_idx] = next_state; + } + else + { + re_dfastate_t *pstate; + unsigned int context; + re_node_set next_nodes, *log_nodes, *table_nodes = NULL; + /* If (state_log[cur_idx] != 0), it implies that cur_idx is + the destination of a multibyte char/collating element/ + back reference. Then the next state is the union set of + these destinations and the results of the transition table. */ + pstate = mctx->state_log[cur_idx]; + log_nodes = pstate->entrance_nodes; + if (next_state != NULL) + { + table_nodes = next_state->entrance_nodes; + *err = re_node_set_init_union (&next_nodes, table_nodes, + log_nodes); + if (BE (*err != REG_NOERROR, 0)) + return NULL; + } + else + next_nodes = *log_nodes; + /* Note: We already add the nodes of the initial state, + then we don't need to add them here. */ + + context = re_string_context_at (&mctx->input, + re_string_cur_idx (&mctx->input) - 1, + mctx->eflags); + next_state = mctx->state_log[cur_idx] + = re_acquire_state_context (err, dfa, &next_nodes, context); + /* We don't need to check errors here, since the return value of + this function is next_state and ERR is already set. */ + + if (table_nodes != NULL) + re_node_set_free (&next_nodes); + } + + if (BE (dfa->nbackref, 0) && next_state != NULL) + { + /* Check OP_OPEN_SUBEXP in the current state in case that we use them + later. We must check them here, since the back references in the + next state might use them. */ + *err = check_subexp_matching_top (mctx, &next_state->nodes, + cur_idx); + if (BE (*err != REG_NOERROR, 0)) + return NULL; + + /* If the next state has back references. */ + if (next_state->has_backref) + { + *err = transit_state_bkref (mctx, &next_state->nodes); + if (BE (*err != REG_NOERROR, 0)) + return NULL; + next_state = mctx->state_log[cur_idx]; + } + } + + return next_state; +} + +/* Skip bytes in the input that correspond to part of a + multi-byte match, then look in the log for a state + from which to restart matching. */ +static re_dfastate_t * +internal_function +find_recover_state (reg_errcode_t *err, re_match_context_t *mctx) +{ + re_dfastate_t *cur_state; + do + { + int max = mctx->state_log_top; + int cur_str_idx = re_string_cur_idx (&mctx->input); + + do + { + if (++cur_str_idx > max) + return NULL; + re_string_skip_bytes (&mctx->input, 1); + } + while (mctx->state_log[cur_str_idx] == NULL); + + cur_state = merge_state_with_log (err, mctx, NULL); + } + while (*err == REG_NOERROR && cur_state == NULL); + return cur_state; +} + +/* Helper functions for transit_state. */ + +/* From the node set CUR_NODES, pick up the nodes whose types are + OP_OPEN_SUBEXP and which have corresponding back references in the regular + expression. And register them to use them later for evaluating the + correspoding back references. */ + +static reg_errcode_t +internal_function +check_subexp_matching_top (re_match_context_t *mctx, re_node_set *cur_nodes, + int str_idx) +{ + const re_dfa_t *const dfa = mctx->dfa; + int node_idx; + reg_errcode_t err; + + /* TODO: This isn't efficient. + Because there might be more than one nodes whose types are + OP_OPEN_SUBEXP and whose index is SUBEXP_IDX, we must check all + nodes. + E.g. RE: (a){2} */ + for (node_idx = 0; node_idx < cur_nodes->nelem; ++node_idx) + { + int node = cur_nodes->elems[node_idx]; + if (dfa->nodes[node].type == OP_OPEN_SUBEXP + && dfa->nodes[node].opr.idx < BITSET_WORD_BITS + && (dfa->used_bkref_map + & ((bitset_word_t) 1 << dfa->nodes[node].opr.idx))) + { + err = match_ctx_add_subtop (mctx, node, str_idx); + if (BE (err != REG_NOERROR, 0)) + return err; + } + } + return REG_NOERROR; +} + +#if 0 +/* Return the next state to which the current state STATE will transit by + accepting the current input byte. */ + +static re_dfastate_t * +transit_state_sb (reg_errcode_t *err, re_match_context_t *mctx, + re_dfastate_t *state) +{ + const re_dfa_t *const dfa = mctx->dfa; + re_node_set next_nodes; + re_dfastate_t *next_state; + int node_cnt, cur_str_idx = re_string_cur_idx (&mctx->input); + unsigned int context; + + *err = re_node_set_alloc (&next_nodes, state->nodes.nelem + 1); + if (BE (*err != REG_NOERROR, 0)) + return NULL; + for (node_cnt = 0; node_cnt < state->nodes.nelem; ++node_cnt) + { + int cur_node = state->nodes.elems[node_cnt]; + if (check_node_accept (mctx, dfa->nodes + cur_node, cur_str_idx)) + { + *err = re_node_set_merge (&next_nodes, + dfa->eclosures + dfa->nexts[cur_node]); + if (BE (*err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return NULL; + } + } + } + context = re_string_context_at (&mctx->input, cur_str_idx, mctx->eflags); + next_state = re_acquire_state_context (err, dfa, &next_nodes, context); + /* We don't need to check errors here, since the return value of + this function is next_state and ERR is already set. */ + + re_node_set_free (&next_nodes); + re_string_skip_bytes (&mctx->input, 1); + return next_state; +} +#endif + +#ifdef RE_ENABLE_I18N +static reg_errcode_t +internal_function +transit_state_mb (re_match_context_t *mctx, re_dfastate_t *pstate) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err; + int i; + + for (i = 0; i < pstate->nodes.nelem; ++i) + { + re_node_set dest_nodes, *new_nodes; + int cur_node_idx = pstate->nodes.elems[i]; + int naccepted, dest_idx; + unsigned int context; + re_dfastate_t *dest_state; + + if (!dfa->nodes[cur_node_idx].accept_mb) + continue; + + if (dfa->nodes[cur_node_idx].constraint) + { + context = re_string_context_at (&mctx->input, + re_string_cur_idx (&mctx->input), + mctx->eflags); + if (NOT_SATISFY_NEXT_CONSTRAINT (dfa->nodes[cur_node_idx].constraint, + context)) + continue; + } + + /* How many bytes the node can accept? */ + naccepted = check_node_accept_bytes (dfa, cur_node_idx, &mctx->input, + re_string_cur_idx (&mctx->input)); + if (naccepted == 0) + continue; + + /* The node can accepts `naccepted' bytes. */ + dest_idx = re_string_cur_idx (&mctx->input) + naccepted; + mctx->max_mb_elem_len = ((mctx->max_mb_elem_len < naccepted) ? naccepted + : mctx->max_mb_elem_len); + err = clean_state_log_if_needed (mctx, dest_idx); + if (BE (err != REG_NOERROR, 0)) + return err; +#ifdef DEBUG + assert (dfa->nexts[cur_node_idx] != -1); +#endif + new_nodes = dfa->eclosures + dfa->nexts[cur_node_idx]; + + dest_state = mctx->state_log[dest_idx]; + if (dest_state == NULL) + dest_nodes = *new_nodes; + else + { + err = re_node_set_init_union (&dest_nodes, + dest_state->entrance_nodes, new_nodes); + if (BE (err != REG_NOERROR, 0)) + return err; + } + context = re_string_context_at (&mctx->input, dest_idx - 1, + mctx->eflags); + mctx->state_log[dest_idx] + = re_acquire_state_context (&err, dfa, &dest_nodes, context); + if (dest_state != NULL) + re_node_set_free (&dest_nodes); + if (BE (mctx->state_log[dest_idx] == NULL && err != REG_NOERROR, 0)) + return err; + } + return REG_NOERROR; +} +#endif /* RE_ENABLE_I18N */ + +static reg_errcode_t +internal_function +transit_state_bkref (re_match_context_t *mctx, const re_node_set *nodes) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err; + int i; + int cur_str_idx = re_string_cur_idx (&mctx->input); + + for (i = 0; i < nodes->nelem; ++i) + { + int dest_str_idx, prev_nelem, bkc_idx; + int node_idx = nodes->elems[i]; + unsigned int context; + const re_token_t *node = dfa->nodes + node_idx; + re_node_set *new_dest_nodes; + + /* Check whether `node' is a backreference or not. */ + if (node->type != OP_BACK_REF) + continue; + + if (node->constraint) + { + context = re_string_context_at (&mctx->input, cur_str_idx, + mctx->eflags); + if (NOT_SATISFY_NEXT_CONSTRAINT (node->constraint, context)) + continue; + } + + /* `node' is a backreference. + Check the substring which the substring matched. */ + bkc_idx = mctx->nbkref_ents; + err = get_subexp (mctx, node_idx, cur_str_idx); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + + /* And add the epsilon closures (which is `new_dest_nodes') of + the backreference to appropriate state_log. */ +#ifdef DEBUG + assert (dfa->nexts[node_idx] != -1); +#endif + for (; bkc_idx < mctx->nbkref_ents; ++bkc_idx) + { + int subexp_len; + re_dfastate_t *dest_state; + struct re_backref_cache_entry *bkref_ent; + bkref_ent = mctx->bkref_ents + bkc_idx; + if (bkref_ent->node != node_idx || bkref_ent->str_idx != cur_str_idx) + continue; + subexp_len = bkref_ent->subexp_to - bkref_ent->subexp_from; + new_dest_nodes = (subexp_len == 0 + ? dfa->eclosures + dfa->edests[node_idx].elems[0] + : dfa->eclosures + dfa->nexts[node_idx]); + dest_str_idx = (cur_str_idx + bkref_ent->subexp_to + - bkref_ent->subexp_from); + context = re_string_context_at (&mctx->input, dest_str_idx - 1, + mctx->eflags); + dest_state = mctx->state_log[dest_str_idx]; + prev_nelem = ((mctx->state_log[cur_str_idx] == NULL) ? 0 + : mctx->state_log[cur_str_idx]->nodes.nelem); + /* Add `new_dest_node' to state_log. */ + if (dest_state == NULL) + { + mctx->state_log[dest_str_idx] + = re_acquire_state_context (&err, dfa, new_dest_nodes, + context); + if (BE (mctx->state_log[dest_str_idx] == NULL + && err != REG_NOERROR, 0)) + goto free_return; + } + else + { + re_node_set dest_nodes; + err = re_node_set_init_union (&dest_nodes, + dest_state->entrance_nodes, + new_dest_nodes); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&dest_nodes); + goto free_return; + } + mctx->state_log[dest_str_idx] + = re_acquire_state_context (&err, dfa, &dest_nodes, context); + re_node_set_free (&dest_nodes); + if (BE (mctx->state_log[dest_str_idx] == NULL + && err != REG_NOERROR, 0)) + goto free_return; + } + /* We need to check recursively if the backreference can epsilon + transit. */ + if (subexp_len == 0 + && mctx->state_log[cur_str_idx]->nodes.nelem > prev_nelem) + { + err = check_subexp_matching_top (mctx, new_dest_nodes, + cur_str_idx); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + err = transit_state_bkref (mctx, new_dest_nodes); + if (BE (err != REG_NOERROR, 0)) + goto free_return; + } + } + } + err = REG_NOERROR; + free_return: + return err; +} + +/* Enumerate all the candidates which the backreference BKREF_NODE can match + at BKREF_STR_IDX, and register them by match_ctx_add_entry(). + Note that we might collect inappropriate candidates here. + However, the cost of checking them strictly here is too high, then we + delay these checking for prune_impossible_nodes(). */ + +static reg_errcode_t +internal_function +get_subexp (re_match_context_t *mctx, int bkref_node, int bkref_str_idx) +{ + const re_dfa_t *const dfa = mctx->dfa; + int subexp_num, sub_top_idx; + const char *buf = (const char *) re_string_get_buffer (&mctx->input); + /* Return if we have already checked BKREF_NODE at BKREF_STR_IDX. */ + int cache_idx = search_cur_bkref_entry (mctx, bkref_str_idx); + if (cache_idx != -1) + { + const struct re_backref_cache_entry *entry + = mctx->bkref_ents + cache_idx; + do + if (entry->node == bkref_node) + return REG_NOERROR; /* We already checked it. */ + while (entry++->more); + } + + subexp_num = dfa->nodes[bkref_node].opr.idx; + + /* For each sub expression */ + for (sub_top_idx = 0; sub_top_idx < mctx->nsub_tops; ++sub_top_idx) + { + reg_errcode_t err; + re_sub_match_top_t *sub_top = mctx->sub_tops[sub_top_idx]; + re_sub_match_last_t *sub_last; + int sub_last_idx, sl_str, bkref_str_off; + + if (dfa->nodes[sub_top->node].opr.idx != subexp_num) + continue; /* It isn't related. */ + + sl_str = sub_top->str_idx; + bkref_str_off = bkref_str_idx; + /* At first, check the last node of sub expressions we already + evaluated. */ + for (sub_last_idx = 0; sub_last_idx < sub_top->nlasts; ++sub_last_idx) + { + int sl_str_diff; + sub_last = sub_top->lasts[sub_last_idx]; + sl_str_diff = sub_last->str_idx - sl_str; + /* The matched string by the sub expression match with the substring + at the back reference? */ + if (sl_str_diff > 0) + { + if (BE (bkref_str_off + sl_str_diff > mctx->input.valid_len, 0)) + { + /* Not enough chars for a successful match. */ + if (bkref_str_off + sl_str_diff > mctx->input.len) + break; + + err = clean_state_log_if_needed (mctx, + bkref_str_off + + sl_str_diff); + if (BE (err != REG_NOERROR, 0)) + return err; + buf = (const char *) re_string_get_buffer (&mctx->input); + } + if (memcmp (buf + bkref_str_off, buf + sl_str, sl_str_diff) != 0) + /* We don't need to search this sub expression any more. */ + break; + } + bkref_str_off += sl_str_diff; + sl_str += sl_str_diff; + err = get_subexp_sub (mctx, sub_top, sub_last, bkref_node, + bkref_str_idx); + + /* Reload buf, since the preceding call might have reallocated + the buffer. */ + buf = (const char *) re_string_get_buffer (&mctx->input); + + if (err == REG_NOMATCH) + continue; + if (BE (err != REG_NOERROR, 0)) + return err; + } + + if (sub_last_idx < sub_top->nlasts) + continue; + if (sub_last_idx > 0) + ++sl_str; + /* Then, search for the other last nodes of the sub expression. */ + for (; sl_str <= bkref_str_idx; ++sl_str) + { + int cls_node, sl_str_off; + const re_node_set *nodes; + sl_str_off = sl_str - sub_top->str_idx; + /* The matched string by the sub expression match with the substring + at the back reference? */ + if (sl_str_off > 0) + { + if (BE (bkref_str_off >= mctx->input.valid_len, 0)) + { + /* If we are at the end of the input, we cannot match. */ + if (bkref_str_off >= mctx->input.len) + break; + + err = extend_buffers (mctx); + if (BE (err != REG_NOERROR, 0)) + return err; + + buf = (const char *) re_string_get_buffer (&mctx->input); + } + if (buf [bkref_str_off++] != buf[sl_str - 1]) + break; /* We don't need to search this sub expression + any more. */ + } + if (mctx->state_log[sl_str] == NULL) + continue; + /* Does this state have a ')' of the sub expression? */ + nodes = &mctx->state_log[sl_str]->nodes; + cls_node = find_subexp_node (dfa, nodes, subexp_num, + OP_CLOSE_SUBEXP); + if (cls_node == -1) + continue; /* No. */ + if (sub_top->path == NULL) + { + sub_top->path = calloc (sizeof (state_array_t), + sl_str - sub_top->str_idx + 1); + if (sub_top->path == NULL) + return REG_ESPACE; + } + /* Can the OP_OPEN_SUBEXP node arrive the OP_CLOSE_SUBEXP node + in the current context? */ + err = check_arrival (mctx, sub_top->path, sub_top->node, + sub_top->str_idx, cls_node, sl_str, + OP_CLOSE_SUBEXP); + if (err == REG_NOMATCH) + continue; + if (BE (err != REG_NOERROR, 0)) + return err; + sub_last = match_ctx_add_sublast (sub_top, cls_node, sl_str); + if (BE (sub_last == NULL, 0)) + return REG_ESPACE; + err = get_subexp_sub (mctx, sub_top, sub_last, bkref_node, + bkref_str_idx); + if (err == REG_NOMATCH) + continue; + } + } + return REG_NOERROR; +} + +/* Helper functions for get_subexp(). */ + +/* Check SUB_LAST can arrive to the back reference BKREF_NODE at BKREF_STR. + If it can arrive, register the sub expression expressed with SUB_TOP + and SUB_LAST. */ + +static reg_errcode_t +internal_function +get_subexp_sub (re_match_context_t *mctx, const re_sub_match_top_t *sub_top, + re_sub_match_last_t *sub_last, int bkref_node, int bkref_str) +{ + reg_errcode_t err; + int to_idx; + /* Can the subexpression arrive the back reference? */ + err = check_arrival (mctx, &sub_last->path, sub_last->node, + sub_last->str_idx, bkref_node, bkref_str, + OP_OPEN_SUBEXP); + if (err != REG_NOERROR) + return err; + err = match_ctx_add_entry (mctx, bkref_node, bkref_str, sub_top->str_idx, + sub_last->str_idx); + if (BE (err != REG_NOERROR, 0)) + return err; + to_idx = bkref_str + sub_last->str_idx - sub_top->str_idx; + return clean_state_log_if_needed (mctx, to_idx); +} + +/* Find the first node which is '(' or ')' and whose index is SUBEXP_IDX. + Search '(' if FL_OPEN, or search ')' otherwise. + TODO: This function isn't efficient... + Because there might be more than one nodes whose types are + OP_OPEN_SUBEXP and whose index is SUBEXP_IDX, we must check all + nodes. + E.g. RE: (a){2} */ + +static int +internal_function +find_subexp_node (const re_dfa_t *dfa, const re_node_set *nodes, + int subexp_idx, int type) +{ + int cls_idx; + for (cls_idx = 0; cls_idx < nodes->nelem; ++cls_idx) + { + int cls_node = nodes->elems[cls_idx]; + const re_token_t *node = dfa->nodes + cls_node; + if (node->type == type + && node->opr.idx == subexp_idx) + return cls_node; + } + return -1; +} + +/* Check whether the node TOP_NODE at TOP_STR can arrive to the node + LAST_NODE at LAST_STR. We record the path onto PATH since it will be + heavily reused. + Return REG_NOERROR if it can arrive, or REG_NOMATCH otherwise. */ + +static reg_errcode_t +internal_function +check_arrival (re_match_context_t *mctx, state_array_t *path, int top_node, + int top_str, int last_node, int last_str, int type) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err = REG_NOERROR; + int subexp_num, backup_cur_idx, str_idx, null_cnt; + re_dfastate_t *cur_state = NULL; + re_node_set *cur_nodes, next_nodes; + re_dfastate_t **backup_state_log; + unsigned int context; + + subexp_num = dfa->nodes[top_node].opr.idx; + /* Extend the buffer if we need. */ + if (BE (path->alloc < last_str + mctx->max_mb_elem_len + 1, 0)) + { + re_dfastate_t **new_array; + int old_alloc = path->alloc; + path->alloc += last_str + mctx->max_mb_elem_len + 1; + new_array = re_realloc (path->array, re_dfastate_t *, path->alloc); + if (BE (new_array == NULL, 0)) + { + path->alloc = old_alloc; + return REG_ESPACE; + } + path->array = new_array; + memset (new_array + old_alloc, '\0', + sizeof (re_dfastate_t *) * (path->alloc - old_alloc)); + } + + str_idx = path->next_idx ? path->next_idx : top_str; + + /* Temporary modify MCTX. */ + backup_state_log = mctx->state_log; + backup_cur_idx = mctx->input.cur_idx; + mctx->state_log = path->array; + mctx->input.cur_idx = str_idx; + + /* Setup initial node set. */ + context = re_string_context_at (&mctx->input, str_idx - 1, mctx->eflags); + if (str_idx == top_str) + { + err = re_node_set_init_1 (&next_nodes, top_node); + if (BE (err != REG_NOERROR, 0)) + return err; + err = check_arrival_expand_ecl (dfa, &next_nodes, subexp_num, type); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + } + else + { + cur_state = mctx->state_log[str_idx]; + if (cur_state && cur_state->has_backref) + { + err = re_node_set_init_copy (&next_nodes, &cur_state->nodes); + if (BE (err != REG_NOERROR, 0)) + return err; + } + else + re_node_set_init_empty (&next_nodes); + } + if (str_idx == top_str || (cur_state && cur_state->has_backref)) + { + if (next_nodes.nelem) + { + err = expand_bkref_cache (mctx, &next_nodes, str_idx, + subexp_num, type); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + } + cur_state = re_acquire_state_context (&err, dfa, &next_nodes, context); + if (BE (cur_state == NULL && err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + mctx->state_log[str_idx] = cur_state; + } + + for (null_cnt = 0; str_idx < last_str && null_cnt <= mctx->max_mb_elem_len;) + { + re_node_set_empty (&next_nodes); + if (mctx->state_log[str_idx + 1]) + { + err = re_node_set_merge (&next_nodes, + &mctx->state_log[str_idx + 1]->nodes); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + } + if (cur_state) + { + err = check_arrival_add_next_nodes (mctx, str_idx, + &cur_state->non_eps_nodes, + &next_nodes); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + } + ++str_idx; + if (next_nodes.nelem) + { + err = check_arrival_expand_ecl (dfa, &next_nodes, subexp_num, type); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + err = expand_bkref_cache (mctx, &next_nodes, str_idx, + subexp_num, type); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + } + context = re_string_context_at (&mctx->input, str_idx - 1, mctx->eflags); + cur_state = re_acquire_state_context (&err, dfa, &next_nodes, context); + if (BE (cur_state == NULL && err != REG_NOERROR, 0)) + { + re_node_set_free (&next_nodes); + return err; + } + mctx->state_log[str_idx] = cur_state; + null_cnt = cur_state == NULL ? null_cnt + 1 : 0; + } + re_node_set_free (&next_nodes); + cur_nodes = (mctx->state_log[last_str] == NULL ? NULL + : &mctx->state_log[last_str]->nodes); + path->next_idx = str_idx; + + /* Fix MCTX. */ + mctx->state_log = backup_state_log; + mctx->input.cur_idx = backup_cur_idx; + + /* Then check the current node set has the node LAST_NODE. */ + if (cur_nodes != NULL && re_node_set_contains (cur_nodes, last_node)) + return REG_NOERROR; + + return REG_NOMATCH; +} + +/* Helper functions for check_arrival. */ + +/* Calculate the destination nodes of CUR_NODES at STR_IDX, and append them + to NEXT_NODES. + TODO: This function is similar to the functions transit_state*(), + however this function has many additional works. + Can't we unify them? */ + +static reg_errcode_t +internal_function +check_arrival_add_next_nodes (re_match_context_t *mctx, int str_idx, + re_node_set *cur_nodes, re_node_set *next_nodes) +{ + const re_dfa_t *const dfa = mctx->dfa; + int result; + int cur_idx; +#ifdef RE_ENABLE_I18N + reg_errcode_t err = REG_NOERROR; +#endif + re_node_set union_set; + re_node_set_init_empty (&union_set); + for (cur_idx = 0; cur_idx < cur_nodes->nelem; ++cur_idx) + { + int naccepted = 0; + int cur_node = cur_nodes->elems[cur_idx]; +#ifdef DEBUG + re_token_type_t type = dfa->nodes[cur_node].type; + assert (!IS_EPSILON_NODE (type)); +#endif +#ifdef RE_ENABLE_I18N + /* If the node may accept `multi byte'. */ + if (dfa->nodes[cur_node].accept_mb) + { + naccepted = check_node_accept_bytes (dfa, cur_node, &mctx->input, + str_idx); + if (naccepted > 1) + { + re_dfastate_t *dest_state; + int next_node = dfa->nexts[cur_node]; + int next_idx = str_idx + naccepted; + dest_state = mctx->state_log[next_idx]; + re_node_set_empty (&union_set); + if (dest_state) + { + err = re_node_set_merge (&union_set, &dest_state->nodes); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&union_set); + return err; + } + } + result = re_node_set_insert (&union_set, next_node); + if (BE (result < 0, 0)) + { + re_node_set_free (&union_set); + return REG_ESPACE; + } + mctx->state_log[next_idx] = re_acquire_state (&err, dfa, + &union_set); + if (BE (mctx->state_log[next_idx] == NULL + && err != REG_NOERROR, 0)) + { + re_node_set_free (&union_set); + return err; + } + } + } +#endif /* RE_ENABLE_I18N */ + if (naccepted + || check_node_accept (mctx, dfa->nodes + cur_node, str_idx)) + { + result = re_node_set_insert (next_nodes, dfa->nexts[cur_node]); + if (BE (result < 0, 0)) + { + re_node_set_free (&union_set); + return REG_ESPACE; + } + } + } + re_node_set_free (&union_set); + return REG_NOERROR; +} + +/* For all the nodes in CUR_NODES, add the epsilon closures of them to + CUR_NODES, however exclude the nodes which are: + - inside the sub expression whose number is EX_SUBEXP, if FL_OPEN. + - out of the sub expression whose number is EX_SUBEXP, if !FL_OPEN. +*/ + +static reg_errcode_t +internal_function +check_arrival_expand_ecl (const re_dfa_t *dfa, re_node_set *cur_nodes, + int ex_subexp, int type) +{ + reg_errcode_t err; + int idx, outside_node; + re_node_set new_nodes; +#ifdef DEBUG + assert (cur_nodes->nelem); +#endif + err = re_node_set_alloc (&new_nodes, cur_nodes->nelem); + if (BE (err != REG_NOERROR, 0)) + return err; + /* Create a new node set NEW_NODES with the nodes which are epsilon + closures of the node in CUR_NODES. */ + + for (idx = 0; idx < cur_nodes->nelem; ++idx) + { + int cur_node = cur_nodes->elems[idx]; + const re_node_set *eclosure = dfa->eclosures + cur_node; + outside_node = find_subexp_node (dfa, eclosure, ex_subexp, type); + if (outside_node == -1) + { + /* There are no problematic nodes, just merge them. */ + err = re_node_set_merge (&new_nodes, eclosure); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&new_nodes); + return err; + } + } + else + { + /* There are problematic nodes, re-calculate incrementally. */ + err = check_arrival_expand_ecl_sub (dfa, &new_nodes, cur_node, + ex_subexp, type); + if (BE (err != REG_NOERROR, 0)) + { + re_node_set_free (&new_nodes); + return err; + } + } + } + re_node_set_free (cur_nodes); + *cur_nodes = new_nodes; + return REG_NOERROR; +} + +/* Helper function for check_arrival_expand_ecl. + Check incrementally the epsilon closure of TARGET, and if it isn't + problematic append it to DST_NODES. */ + +static reg_errcode_t +internal_function +check_arrival_expand_ecl_sub (const re_dfa_t *dfa, re_node_set *dst_nodes, + int target, int ex_subexp, int type) +{ + int cur_node; + for (cur_node = target; !re_node_set_contains (dst_nodes, cur_node);) + { + int err; + + if (dfa->nodes[cur_node].type == type + && dfa->nodes[cur_node].opr.idx == ex_subexp) + { + if (type == OP_CLOSE_SUBEXP) + { + err = re_node_set_insert (dst_nodes, cur_node); + if (BE (err == -1, 0)) + return REG_ESPACE; + } + break; + } + err = re_node_set_insert (dst_nodes, cur_node); + if (BE (err == -1, 0)) + return REG_ESPACE; + if (dfa->edests[cur_node].nelem == 0) + break; + if (dfa->edests[cur_node].nelem == 2) + { + err = check_arrival_expand_ecl_sub (dfa, dst_nodes, + dfa->edests[cur_node].elems[1], + ex_subexp, type); + if (BE (err != REG_NOERROR, 0)) + return err; + } + cur_node = dfa->edests[cur_node].elems[0]; + } + return REG_NOERROR; +} + + +/* For all the back references in the current state, calculate the + destination of the back references by the appropriate entry + in MCTX->BKREF_ENTS. */ + +static reg_errcode_t +internal_function +expand_bkref_cache (re_match_context_t *mctx, re_node_set *cur_nodes, + int cur_str, int subexp_num, int type) +{ + const re_dfa_t *const dfa = mctx->dfa; + reg_errcode_t err; + int cache_idx_start = search_cur_bkref_entry (mctx, cur_str); + struct re_backref_cache_entry *ent; + + if (cache_idx_start == -1) + return REG_NOERROR; + + restart: + ent = mctx->bkref_ents + cache_idx_start; + do + { + int to_idx, next_node; + + /* Is this entry ENT is appropriate? */ + if (!re_node_set_contains (cur_nodes, ent->node)) + continue; /* No. */ + + to_idx = cur_str + ent->subexp_to - ent->subexp_from; + /* Calculate the destination of the back reference, and append it + to MCTX->STATE_LOG. */ + if (to_idx == cur_str) + { + /* The backreference did epsilon transit, we must re-check all the + node in the current state. */ + re_node_set new_dests; + reg_errcode_t err2, err3; + next_node = dfa->edests[ent->node].elems[0]; + if (re_node_set_contains (cur_nodes, next_node)) + continue; + err = re_node_set_init_1 (&new_dests, next_node); + err2 = check_arrival_expand_ecl (dfa, &new_dests, subexp_num, type); + err3 = re_node_set_merge (cur_nodes, &new_dests); + re_node_set_free (&new_dests); + if (BE (err != REG_NOERROR || err2 != REG_NOERROR + || err3 != REG_NOERROR, 0)) + { + err = (err != REG_NOERROR ? err + : (err2 != REG_NOERROR ? err2 : err3)); + return err; + } + /* TODO: It is still inefficient... */ + goto restart; + } + else + { + re_node_set union_set; + next_node = dfa->nexts[ent->node]; + if (mctx->state_log[to_idx]) + { + int ret; + if (re_node_set_contains (&mctx->state_log[to_idx]->nodes, + next_node)) + continue; + err = re_node_set_init_copy (&union_set, + &mctx->state_log[to_idx]->nodes); + ret = re_node_set_insert (&union_set, next_node); + if (BE (err != REG_NOERROR || ret < 0, 0)) + { + re_node_set_free (&union_set); + err = err != REG_NOERROR ? err : REG_ESPACE; + return err; + } + } + else + { + err = re_node_set_init_1 (&union_set, next_node); + if (BE (err != REG_NOERROR, 0)) + return err; + } + mctx->state_log[to_idx] = re_acquire_state (&err, dfa, &union_set); + re_node_set_free (&union_set); + if (BE (mctx->state_log[to_idx] == NULL + && err != REG_NOERROR, 0)) + return err; + } + } + while (ent++->more); + return REG_NOERROR; +} + +/* Build transition table for the state. + Return 1 if succeeded, otherwise return NULL. */ + +static int +internal_function +build_trtable (const re_dfa_t *dfa, re_dfastate_t *state) +{ + reg_errcode_t err; + int i, j, ch, need_word_trtable = 0; + bitset_word_t elem, mask; + bool dests_node_malloced = false; + bool dest_states_malloced = false; + int ndests; /* Number of the destination states from `state'. */ + re_dfastate_t **trtable; + re_dfastate_t **dest_states = NULL, **dest_states_word, **dest_states_nl; + re_node_set follows, *dests_node; + bitset_t *dests_ch; + bitset_t acceptable; + + struct dests_alloc + { + re_node_set dests_node[SBC_MAX]; + bitset_t dests_ch[SBC_MAX]; + } *dests_alloc; + + /* We build DFA states which corresponds to the destination nodes + from `state'. `dests_node[i]' represents the nodes which i-th + destination state contains, and `dests_ch[i]' represents the + characters which i-th destination state accepts. */ +#ifdef HAVE_ALLOCA + if (__libc_use_alloca (sizeof (struct dests_alloc))) + dests_alloc = (struct dests_alloc *) alloca (sizeof (struct dests_alloc)); + else +#endif + { + dests_alloc = re_malloc (struct dests_alloc, 1); + if (BE (dests_alloc == NULL, 0)) + return 0; + dests_node_malloced = true; + } + dests_node = dests_alloc->dests_node; + dests_ch = dests_alloc->dests_ch; + + /* Initialize transiton table. */ + state->word_trtable = state->trtable = NULL; + + /* At first, group all nodes belonging to `state' into several + destinations. */ + ndests = group_nodes_into_DFAstates (dfa, state, dests_node, dests_ch); + if (BE (ndests <= 0, 0)) + { + if (dests_node_malloced) + free (dests_alloc); + /* Return 0 in case of an error, 1 otherwise. */ + if (ndests == 0) + { + state->trtable = (re_dfastate_t **) + calloc (sizeof (re_dfastate_t *), SBC_MAX); + return 1; + } + return 0; + } + + err = re_node_set_alloc (&follows, ndests + 1); + if (BE (err != REG_NOERROR, 0)) + goto out_free; + + /* Avoid arithmetic overflow in size calculation. */ + if (BE ((((SIZE_MAX - (sizeof (re_node_set) + sizeof (bitset_t)) * SBC_MAX) + / (3 * sizeof (re_dfastate_t *))) + < ndests), + 0)) + goto out_free; + +#ifdef HAVE_ALLOCA + if (__libc_use_alloca ((sizeof (re_node_set) + sizeof (bitset_t)) * SBC_MAX + + ndests * 3 * sizeof (re_dfastate_t *))) + dest_states = (re_dfastate_t **) + alloca (ndests * 3 * sizeof (re_dfastate_t *)); + else +#endif + { + dest_states = (re_dfastate_t **) + malloc (ndests * 3 * sizeof (re_dfastate_t *)); + if (BE (dest_states == NULL, 0)) + { +out_free: + if (dest_states_malloced) + free (dest_states); + re_node_set_free (&follows); + for (i = 0; i < ndests; ++i) + re_node_set_free (dests_node + i); + if (dests_node_malloced) + free (dests_alloc); + return 0; + } + dest_states_malloced = true; + } + dest_states_word = dest_states + ndests; + dest_states_nl = dest_states_word + ndests; + bitset_empty (acceptable); + + /* Then build the states for all destinations. */ + for (i = 0; i < ndests; ++i) + { + int next_node; + re_node_set_empty (&follows); + /* Merge the follows of this destination states. */ + for (j = 0; j < dests_node[i].nelem; ++j) + { + next_node = dfa->nexts[dests_node[i].elems[j]]; + if (next_node != -1) + { + err = re_node_set_merge (&follows, dfa->eclosures + next_node); + if (BE (err != REG_NOERROR, 0)) + goto out_free; + } + } + dest_states[i] = re_acquire_state_context (&err, dfa, &follows, 0); + if (BE (dest_states[i] == NULL && err != REG_NOERROR, 0)) + goto out_free; + /* If the new state has context constraint, + build appropriate states for these contexts. */ + if (dest_states[i]->has_constraint) + { + dest_states_word[i] = re_acquire_state_context (&err, dfa, &follows, + CONTEXT_WORD); + if (BE (dest_states_word[i] == NULL && err != REG_NOERROR, 0)) + goto out_free; + + if (dest_states[i] != dest_states_word[i] && dfa->mb_cur_max > 1) + need_word_trtable = 1; + + dest_states_nl[i] = re_acquire_state_context (&err, dfa, &follows, + CONTEXT_NEWLINE); + if (BE (dest_states_nl[i] == NULL && err != REG_NOERROR, 0)) + goto out_free; + } + else + { + dest_states_word[i] = dest_states[i]; + dest_states_nl[i] = dest_states[i]; + } + bitset_merge (acceptable, dests_ch[i]); + } + + if (!BE (need_word_trtable, 0)) + { + /* We don't care about whether the following character is a word + character, or we are in a single-byte character set so we can + discern by looking at the character code: allocate a + 256-entry transition table. */ + trtable = state->trtable = + (re_dfastate_t **) calloc (sizeof (re_dfastate_t *), SBC_MAX); + if (BE (trtable == NULL, 0)) + goto out_free; + + /* For all characters ch...: */ + for (i = 0; i < BITSET_WORDS; ++i) + for (ch = i * BITSET_WORD_BITS, elem = acceptable[i], mask = 1; + elem; + mask <<= 1, elem >>= 1, ++ch) + if (BE (elem & 1, 0)) + { + /* There must be exactly one destination which accepts + character ch. See group_nodes_into_DFAstates. */ + for (j = 0; (dests_ch[j][i] & mask) == 0; ++j) + ; + + /* j-th destination accepts the word character ch. */ + if (dfa->word_char[i] & mask) + trtable[ch] = dest_states_word[j]; + else + trtable[ch] = dest_states[j]; + } + } + else + { + /* We care about whether the following character is a word + character, and we are in a multi-byte character set: discern + by looking at the character code: build two 256-entry + transition tables, one starting at trtable[0] and one + starting at trtable[SBC_MAX]. */ + trtable = state->word_trtable = + (re_dfastate_t **) calloc (sizeof (re_dfastate_t *), 2 * SBC_MAX); + if (BE (trtable == NULL, 0)) + goto out_free; + + /* For all characters ch...: */ + for (i = 0; i < BITSET_WORDS; ++i) + for (ch = i * BITSET_WORD_BITS, elem = acceptable[i], mask = 1; + elem; + mask <<= 1, elem >>= 1, ++ch) + if (BE (elem & 1, 0)) + { + /* There must be exactly one destination which accepts + character ch. See group_nodes_into_DFAstates. */ + for (j = 0; (dests_ch[j][i] & mask) == 0; ++j) + ; + + /* j-th destination accepts the word character ch. */ + trtable[ch] = dest_states[j]; + trtable[ch + SBC_MAX] = dest_states_word[j]; + } + } + + /* new line */ + if (bitset_contain (acceptable, NEWLINE_CHAR)) + { + /* The current state accepts newline character. */ + for (j = 0; j < ndests; ++j) + if (bitset_contain (dests_ch[j], NEWLINE_CHAR)) + { + /* k-th destination accepts newline character. */ + trtable[NEWLINE_CHAR] = dest_states_nl[j]; + if (need_word_trtable) + trtable[NEWLINE_CHAR + SBC_MAX] = dest_states_nl[j]; + /* There must be only one destination which accepts + newline. See group_nodes_into_DFAstates. */ + break; + } + } + + if (dest_states_malloced) + free (dest_states); + + re_node_set_free (&follows); + for (i = 0; i < ndests; ++i) + re_node_set_free (dests_node + i); + + if (dests_node_malloced) + free (dests_alloc); + + return 1; +} + +/* Group all nodes belonging to STATE into several destinations. + Then for all destinations, set the nodes belonging to the destination + to DESTS_NODE[i] and set the characters accepted by the destination + to DEST_CH[i]. This function return the number of destinations. */ + +static int +internal_function +group_nodes_into_DFAstates (const re_dfa_t *dfa, const re_dfastate_t *state, + re_node_set *dests_node, bitset_t *dests_ch) +{ + reg_errcode_t err; + int result; + int i, j, k; + int ndests; /* Number of the destinations from `state'. */ + bitset_t accepts; /* Characters a node can accept. */ + const re_node_set *cur_nodes = &state->nodes; + bitset_empty (accepts); + ndests = 0; + + /* For all the nodes belonging to `state', */ + for (i = 0; i < cur_nodes->nelem; ++i) + { + re_token_t *node = &dfa->nodes[cur_nodes->elems[i]]; + re_token_type_t type = node->type; + unsigned int constraint = node->constraint; + + /* Enumerate all single byte character this node can accept. */ + if (type == CHARACTER) + bitset_set (accepts, node->opr.c); + else if (type == SIMPLE_BRACKET) + { + bitset_merge (accepts, node->opr.sbcset); + } + else if (type == OP_PERIOD) + { +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + bitset_merge (accepts, dfa->sb_char); + else +#endif + bitset_set_all (accepts); + if (!(dfa->syntax & RE_DOT_NEWLINE)) + bitset_clear (accepts, '\n'); + if (dfa->syntax & RE_DOT_NOT_NULL) + bitset_clear (accepts, '\0'); + } +#ifdef RE_ENABLE_I18N + else if (type == OP_UTF8_PERIOD) + { + memset (accepts, '\xff', sizeof (bitset_t) / 2); + if (!(dfa->syntax & RE_DOT_NEWLINE)) + bitset_clear (accepts, '\n'); + if (dfa->syntax & RE_DOT_NOT_NULL) + bitset_clear (accepts, '\0'); + } +#endif + else + continue; + + /* Check the `accepts' and sift the characters which are not + match it the context. */ + if (constraint) + { + if (constraint & NEXT_NEWLINE_CONSTRAINT) + { + bool accepts_newline = bitset_contain (accepts, NEWLINE_CHAR); + bitset_empty (accepts); + if (accepts_newline) + bitset_set (accepts, NEWLINE_CHAR); + else + continue; + } + if (constraint & NEXT_ENDBUF_CONSTRAINT) + { + bitset_empty (accepts); + continue; + } + + if (constraint & NEXT_WORD_CONSTRAINT) + { + bitset_word_t any_set = 0; + if (type == CHARACTER && !node->word_char) + { + bitset_empty (accepts); + continue; + } +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + for (j = 0; j < BITSET_WORDS; ++j) + any_set |= (accepts[j] &= (dfa->word_char[j] | ~dfa->sb_char[j])); + else +#endif + for (j = 0; j < BITSET_WORDS; ++j) + any_set |= (accepts[j] &= dfa->word_char[j]); + if (!any_set) + continue; + } + if (constraint & NEXT_NOTWORD_CONSTRAINT) + { + bitset_word_t any_set = 0; + if (type == CHARACTER && node->word_char) + { + bitset_empty (accepts); + continue; + } +#ifdef RE_ENABLE_I18N + if (dfa->mb_cur_max > 1) + for (j = 0; j < BITSET_WORDS; ++j) + any_set |= (accepts[j] &= ~(dfa->word_char[j] & dfa->sb_char[j])); + else +#endif + for (j = 0; j < BITSET_WORDS; ++j) + any_set |= (accepts[j] &= ~dfa->word_char[j]); + if (!any_set) + continue; + } + } + + /* Then divide `accepts' into DFA states, or create a new + state. Above, we make sure that accepts is not empty. */ + for (j = 0; j < ndests; ++j) + { + bitset_t intersec; /* Intersection sets, see below. */ + bitset_t remains; + /* Flags, see below. */ + bitset_word_t has_intersec, not_subset, not_consumed; + + /* Optimization, skip if this state doesn't accept the character. */ + if (type == CHARACTER && !bitset_contain (dests_ch[j], node->opr.c)) + continue; + + /* Enumerate the intersection set of this state and `accepts'. */ + has_intersec = 0; + for (k = 0; k < BITSET_WORDS; ++k) + has_intersec |= intersec[k] = accepts[k] & dests_ch[j][k]; + /* And skip if the intersection set is empty. */ + if (!has_intersec) + continue; + + /* Then check if this state is a subset of `accepts'. */ + not_subset = not_consumed = 0; + for (k = 0; k < BITSET_WORDS; ++k) + { + not_subset |= remains[k] = ~accepts[k] & dests_ch[j][k]; + not_consumed |= accepts[k] = accepts[k] & ~dests_ch[j][k]; + } + + /* If this state isn't a subset of `accepts', create a + new group state, which has the `remains'. */ + if (not_subset) + { + bitset_copy (dests_ch[ndests], remains); + bitset_copy (dests_ch[j], intersec); + err = re_node_set_init_copy (dests_node + ndests, &dests_node[j]); + if (BE (err != REG_NOERROR, 0)) + goto error_return; + ++ndests; + } + + /* Put the position in the current group. */ + result = re_node_set_insert (&dests_node[j], cur_nodes->elems[i]); + if (BE (result < 0, 0)) + goto error_return; + + /* If all characters are consumed, go to next node. */ + if (!not_consumed) + break; + } + /* Some characters remain, create a new group. */ + if (j == ndests) + { + bitset_copy (dests_ch[ndests], accepts); + err = re_node_set_init_1 (dests_node + ndests, cur_nodes->elems[i]); + if (BE (err != REG_NOERROR, 0)) + goto error_return; + ++ndests; + bitset_empty (accepts); + } + } + return ndests; + error_return: + for (j = 0; j < ndests; ++j) + re_node_set_free (dests_node + j); + return -1; +} + +#ifdef RE_ENABLE_I18N +/* Check how many bytes the node `dfa->nodes[node_idx]' accepts. + Return the number of the bytes the node accepts. + STR_IDX is the current index of the input string. + + This function handles the nodes which can accept one character, or + one collating element like '.', '[a-z]', opposite to the other nodes + can only accept one byte. */ + +static int +internal_function +check_node_accept_bytes (const re_dfa_t *dfa, int node_idx, + const re_string_t *input, int str_idx) +{ + const re_token_t *node = dfa->nodes + node_idx; + int char_len, elem_len; + int i; + wint_t wc; + + if (BE (node->type == OP_UTF8_PERIOD, 0)) + { + unsigned char c = re_string_byte_at (input, str_idx), d; + if (BE (c < 0xc2, 1)) + return 0; + + if (str_idx + 2 > input->len) + return 0; + + d = re_string_byte_at (input, str_idx + 1); + if (c < 0xe0) + return (d < 0x80 || d > 0xbf) ? 0 : 2; + else if (c < 0xf0) + { + char_len = 3; + if (c == 0xe0 && d < 0xa0) + return 0; + } + else if (c < 0xf8) + { + char_len = 4; + if (c == 0xf0 && d < 0x90) + return 0; + } + else if (c < 0xfc) + { + char_len = 5; + if (c == 0xf8 && d < 0x88) + return 0; + } + else if (c < 0xfe) + { + char_len = 6; + if (c == 0xfc && d < 0x84) + return 0; + } + else + return 0; + + if (str_idx + char_len > input->len) + return 0; + + for (i = 1; i < char_len; ++i) + { + d = re_string_byte_at (input, str_idx + i); + if (d < 0x80 || d > 0xbf) + return 0; + } + return char_len; + } + + char_len = re_string_char_size_at (input, str_idx); + if (node->type == OP_PERIOD) + { + if (char_len <= 1) + return 0; + /* FIXME: I don't think this if is needed, as both '\n' + and '\0' are char_len == 1. */ + /* '.' accepts any one character except the following two cases. */ + if ((!(dfa->syntax & RE_DOT_NEWLINE) && + re_string_byte_at (input, str_idx) == '\n') || + ((dfa->syntax & RE_DOT_NOT_NULL) && + re_string_byte_at (input, str_idx) == '\0')) + return 0; + return char_len; + } + + elem_len = re_string_elem_size_at (input, str_idx); + wc = __btowc(*(input->mbs+str_idx)); + if (((elem_len <= 1 && char_len <= 1) || char_len == 0) && (wc != WEOF && wc < SBC_MAX)) + return 0; + + if (node->type == COMPLEX_BRACKET) + { + const re_charset_t *cset = node->opr.mbcset; +# ifdef _LIBC + const unsigned char *pin + = ((const unsigned char *) re_string_get_buffer (input) + str_idx); + int j; + uint32_t nrules; +# endif /* _LIBC */ + int match_len = 0; + wchar_t wc = ((cset->nranges || cset->nchar_classes || cset->nmbchars) + ? re_string_wchar_at (input, str_idx) : 0); + + /* match with multibyte character? */ + for (i = 0; i < cset->nmbchars; ++i) + if (wc == cset->mbchars[i]) + { + match_len = char_len; + goto check_node_accept_bytes_match; + } + /* match with character_class? */ + for (i = 0; i < cset->nchar_classes; ++i) + { + wctype_t wt = cset->char_classes[i]; + if (__iswctype (wc, wt)) + { + match_len = char_len; + goto check_node_accept_bytes_match; + } + } + +# ifdef _LIBC + nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); + if (nrules != 0) + { + unsigned int in_collseq = 0; + const int32_t *table, *indirect; + const unsigned char *weights, *extra; + const char *collseqwc; + /* This #include defines a local function! */ +# include <locale/weight.h> + + /* match with collating_symbol? */ + if (cset->ncoll_syms) + extra = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); + for (i = 0; i < cset->ncoll_syms; ++i) + { + const unsigned char *coll_sym = extra + cset->coll_syms[i]; + /* Compare the length of input collating element and + the length of current collating element. */ + if (*coll_sym != elem_len) + continue; + /* Compare each bytes. */ + for (j = 0; j < *coll_sym; j++) + if (pin[j] != coll_sym[1 + j]) + break; + if (j == *coll_sym) + { + /* Match if every bytes is equal. */ + match_len = j; + goto check_node_accept_bytes_match; + } + } + + if (cset->nranges) + { + if (elem_len <= char_len) + { + collseqwc = _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQWC); + in_collseq = __collseq_table_lookup (collseqwc, wc); + } + else + in_collseq = find_collation_sequence_value (pin, elem_len); + } + /* match with range expression? */ + for (i = 0; i < cset->nranges; ++i) + if (cset->range_starts[i] <= in_collseq + && in_collseq <= cset->range_ends[i]) + { + match_len = elem_len; + goto check_node_accept_bytes_match; + } + + /* match with equivalence_class? */ + if (cset->nequiv_classes) + { + const unsigned char *cp = pin; + table = (const int32_t *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); + weights = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTMB); + extra = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAMB); + indirect = (const int32_t *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTMB); + int32_t idx = findidx (&cp); + if (idx > 0) + for (i = 0; i < cset->nequiv_classes; ++i) + { + int32_t equiv_class_idx = cset->equiv_classes[i]; + size_t weight_len = weights[idx & 0xffffff]; + if (weight_len == weights[equiv_class_idx & 0xffffff] + && (idx >> 24) == (equiv_class_idx >> 24)) + { + int cnt = 0; + + idx &= 0xffffff; + equiv_class_idx &= 0xffffff; + + while (cnt <= weight_len + && (weights[equiv_class_idx + 1 + cnt] + == weights[idx + 1 + cnt])) + ++cnt; + if (cnt > weight_len) + { + match_len = elem_len; + goto check_node_accept_bytes_match; + } + } + } + } + } + else +# endif /* _LIBC */ + { + /* match with range expression? */ +#if __GNUC__ >= 2 + wchar_t cmp_buf[] = {L'\0', L'\0', wc, L'\0', L'\0', L'\0'}; +#else + wchar_t cmp_buf[] = {L'\0', L'\0', L'\0', L'\0', L'\0', L'\0'}; + cmp_buf[2] = wc; +#endif + for (i = 0; i < cset->nranges; ++i) + { + cmp_buf[0] = cset->range_starts[i]; + cmp_buf[4] = cset->range_ends[i]; + if (wcscoll (cmp_buf, cmp_buf + 2) <= 0 + && wcscoll (cmp_buf + 2, cmp_buf + 4) <= 0) + { + match_len = char_len; + goto check_node_accept_bytes_match; + } + } + } + check_node_accept_bytes_match: + if (!cset->non_match) + return match_len; + else + { + if (match_len > 0) + return 0; + else + return (elem_len > char_len) ? elem_len : char_len; + } + } + return 0; +} + +# ifdef _LIBC +static unsigned int +internal_function +find_collation_sequence_value (const unsigned char *mbs, size_t mbs_len) +{ + uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); + if (nrules == 0) + { + if (mbs_len == 1) + { + /* No valid character. Match it as a single byte character. */ + const unsigned char *collseq = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_COLLSEQMB); + return collseq[mbs[0]]; + } + return UINT_MAX; + } + else + { + int32_t idx; + const unsigned char *extra = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); + int32_t extrasize = (const unsigned char *) + _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB + 1) - extra; + + for (idx = 0; idx < extrasize;) + { + int mbs_cnt, found = 0; + int32_t elem_mbs_len; + /* Skip the name of collating element name. */ + idx = idx + extra[idx] + 1; + elem_mbs_len = extra[idx++]; + if (mbs_len == elem_mbs_len) + { + for (mbs_cnt = 0; mbs_cnt < elem_mbs_len; ++mbs_cnt) + if (extra[idx + mbs_cnt] != mbs[mbs_cnt]) + break; + if (mbs_cnt == elem_mbs_len) + /* Found the entry. */ + found = 1; + } + /* Skip the byte sequence of the collating element. */ + idx += elem_mbs_len; + /* Adjust for the alignment. */ + idx = (idx + 3) & ~3; + /* Skip the collation sequence value. */ + idx += sizeof (uint32_t); + /* Skip the wide char sequence of the collating element. */ + idx = idx + sizeof (uint32_t) * (extra[idx] + 1); + /* If we found the entry, return the sequence value. */ + if (found) + return *(uint32_t *) (extra + idx); + /* Skip the collation sequence value. */ + idx += sizeof (uint32_t); + } + return UINT_MAX; + } +} +# endif /* _LIBC */ +#endif /* RE_ENABLE_I18N */ + +/* Check whether the node accepts the byte which is IDX-th + byte of the INPUT. */ + +static int +internal_function +check_node_accept (const re_match_context_t *mctx, const re_token_t *node, + int idx) +{ + unsigned char ch; + ch = re_string_byte_at (&mctx->input, idx); + switch (node->type) + { + case CHARACTER: + if (node->opr.c != ch) + return 0; + break; + + case SIMPLE_BRACKET: + if (!bitset_contain (node->opr.sbcset, ch)) + return 0; + break; + +#ifdef RE_ENABLE_I18N + case OP_UTF8_PERIOD: + if (ch >= 0x80) + return 0; + /* FALLTHROUGH */ +#endif + case OP_PERIOD: + if ((ch == '\n' && !(mctx->dfa->syntax & RE_DOT_NEWLINE)) + || (ch == '\0' && (mctx->dfa->syntax & RE_DOT_NOT_NULL))) + return 0; + break; + + default: + return 0; + } + + if (node->constraint) + { + /* The node has constraints. Check whether the current context + satisfies the constraints. */ + unsigned int context = re_string_context_at (&mctx->input, idx, + mctx->eflags); + if (NOT_SATISFY_NEXT_CONSTRAINT (node->constraint, context)) + return 0; + } + + return 1; +} + +/* Extend the buffers, if the buffers have run out. */ + +static reg_errcode_t +internal_function +extend_buffers (re_match_context_t *mctx) +{ + reg_errcode_t ret; + re_string_t *pstr = &mctx->input; + + /* Avoid overflow. */ + if (BE (INT_MAX / 2 / sizeof (re_dfastate_t *) <= pstr->bufs_len, 0)) + return REG_ESPACE; + + /* Double the lengthes of the buffers. */ + ret = re_string_realloc_buffers (pstr, pstr->bufs_len * 2); + if (BE (ret != REG_NOERROR, 0)) + return ret; + + if (mctx->state_log != NULL) + { + /* And double the length of state_log. */ + /* XXX We have no indication of the size of this buffer. If this + allocation fail we have no indication that the state_log array + does not have the right size. */ + re_dfastate_t **new_array = re_realloc (mctx->state_log, re_dfastate_t *, + pstr->bufs_len + 1); + if (BE (new_array == NULL, 0)) + return REG_ESPACE; + mctx->state_log = new_array; + } + + /* Then reconstruct the buffers. */ + if (pstr->icase) + { +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + { + ret = build_wcs_upper_buffer (pstr); + if (BE (ret != REG_NOERROR, 0)) + return ret; + } + else +#endif /* RE_ENABLE_I18N */ + build_upper_buffer (pstr); + } + else + { +#ifdef RE_ENABLE_I18N + if (pstr->mb_cur_max > 1) + build_wcs_buffer (pstr); + else +#endif /* RE_ENABLE_I18N */ + { + if (pstr->trans != NULL) + re_string_translate_buffer (pstr); + } + } + return REG_NOERROR; +} + + +/* Functions for matching context. */ + +/* Initialize MCTX. */ + +static reg_errcode_t +internal_function +match_ctx_init (re_match_context_t *mctx, int eflags, int n) +{ + mctx->eflags = eflags; + mctx->match_last = -1; + if (n > 0) + { + mctx->bkref_ents = re_malloc (struct re_backref_cache_entry, n); + mctx->sub_tops = re_malloc (re_sub_match_top_t *, n); + if (BE (mctx->bkref_ents == NULL || mctx->sub_tops == NULL, 0)) + return REG_ESPACE; + } + /* Already zero-ed by the caller. + else + mctx->bkref_ents = NULL; + mctx->nbkref_ents = 0; + mctx->nsub_tops = 0; */ + mctx->abkref_ents = n; + mctx->max_mb_elem_len = 1; + mctx->asub_tops = n; + return REG_NOERROR; +} + +/* Clean the entries which depend on the current input in MCTX. + This function must be invoked when the matcher changes the start index + of the input, or changes the input string. */ + +static void +internal_function +match_ctx_clean (re_match_context_t *mctx) +{ + int st_idx; + for (st_idx = 0; st_idx < mctx->nsub_tops; ++st_idx) + { + int sl_idx; + re_sub_match_top_t *top = mctx->sub_tops[st_idx]; + for (sl_idx = 0; sl_idx < top->nlasts; ++sl_idx) + { + re_sub_match_last_t *last = top->lasts[sl_idx]; + re_free (last->path.array); + re_free (last); + } + re_free (top->lasts); + if (top->path) + { + re_free (top->path->array); + re_free (top->path); + } + free (top); + } + + mctx->nsub_tops = 0; + mctx->nbkref_ents = 0; +} + +/* Free all the memory associated with MCTX. */ + +static void +internal_function +match_ctx_free (re_match_context_t *mctx) +{ + /* First, free all the memory associated with MCTX->SUB_TOPS. */ + match_ctx_clean (mctx); + re_free (mctx->sub_tops); + re_free (mctx->bkref_ents); +} + +/* Add a new backreference entry to MCTX. + Note that we assume that caller never call this function with duplicate + entry, and call with STR_IDX which isn't smaller than any existing entry. +*/ + +static reg_errcode_t +internal_function +match_ctx_add_entry (re_match_context_t *mctx, int node, int str_idx, int from, + int to) +{ + if (mctx->nbkref_ents >= mctx->abkref_ents) + { + struct re_backref_cache_entry* new_entry; + new_entry = re_realloc (mctx->bkref_ents, struct re_backref_cache_entry, + mctx->abkref_ents * 2); + if (BE (new_entry == NULL, 0)) + { + re_free (mctx->bkref_ents); + return REG_ESPACE; + } + mctx->bkref_ents = new_entry; + memset (mctx->bkref_ents + mctx->nbkref_ents, '\0', + sizeof (struct re_backref_cache_entry) * mctx->abkref_ents); + mctx->abkref_ents *= 2; + } + if (mctx->nbkref_ents > 0 + && mctx->bkref_ents[mctx->nbkref_ents - 1].str_idx == str_idx) + mctx->bkref_ents[mctx->nbkref_ents - 1].more = 1; + + mctx->bkref_ents[mctx->nbkref_ents].node = node; + mctx->bkref_ents[mctx->nbkref_ents].str_idx = str_idx; + mctx->bkref_ents[mctx->nbkref_ents].subexp_from = from; + mctx->bkref_ents[mctx->nbkref_ents].subexp_to = to; + + /* This is a cache that saves negative results of check_dst_limits_calc_pos. + If bit N is clear, means that this entry won't epsilon-transition to + an OP_OPEN_SUBEXP or OP_CLOSE_SUBEXP for the N+1-th subexpression. If + it is set, check_dst_limits_calc_pos_1 will recurse and try to find one + such node. + + A backreference does not epsilon-transition unless it is empty, so set + to all zeros if FROM != TO. */ + mctx->bkref_ents[mctx->nbkref_ents].eps_reachable_subexps_map + = (from == to ? ~0 : 0); + + mctx->bkref_ents[mctx->nbkref_ents++].more = 0; + if (mctx->max_mb_elem_len < to - from) + mctx->max_mb_elem_len = to - from; + return REG_NOERROR; +} + +/* Search for the first entry which has the same str_idx, or -1 if none is + found. Note that MCTX->BKREF_ENTS is already sorted by MCTX->STR_IDX. */ + +static int +internal_function +search_cur_bkref_entry (const re_match_context_t *mctx, int str_idx) +{ + int left, right, mid, last; + last = right = mctx->nbkref_ents; + for (left = 0; left < right;) + { + mid = (left + right) / 2; + if (mctx->bkref_ents[mid].str_idx < str_idx) + left = mid + 1; + else + right = mid; + } + if (left < last && mctx->bkref_ents[left].str_idx == str_idx) + return left; + else + return -1; +} + +/* Register the node NODE, whose type is OP_OPEN_SUBEXP, and which matches + at STR_IDX. */ + +static reg_errcode_t +internal_function +match_ctx_add_subtop (re_match_context_t *mctx, int node, int str_idx) +{ +#ifdef DEBUG + assert (mctx->sub_tops != NULL); + assert (mctx->asub_tops > 0); +#endif + if (BE (mctx->nsub_tops == mctx->asub_tops, 0)) + { + int new_asub_tops = mctx->asub_tops * 2; + re_sub_match_top_t **new_array = re_realloc (mctx->sub_tops, + re_sub_match_top_t *, + new_asub_tops); + if (BE (new_array == NULL, 0)) + return REG_ESPACE; + mctx->sub_tops = new_array; + mctx->asub_tops = new_asub_tops; + } + mctx->sub_tops[mctx->nsub_tops] = calloc (1, sizeof (re_sub_match_top_t)); + if (BE (mctx->sub_tops[mctx->nsub_tops] == NULL, 0)) + return REG_ESPACE; + mctx->sub_tops[mctx->nsub_tops]->node = node; + mctx->sub_tops[mctx->nsub_tops++]->str_idx = str_idx; + return REG_NOERROR; +} + +/* Register the node NODE, whose type is OP_CLOSE_SUBEXP, and which matches + at STR_IDX, whose corresponding OP_OPEN_SUBEXP is SUB_TOP. */ + +static re_sub_match_last_t * +internal_function +match_ctx_add_sublast (re_sub_match_top_t *subtop, int node, int str_idx) +{ + re_sub_match_last_t *new_entry; + if (BE (subtop->nlasts == subtop->alasts, 0)) + { + int new_alasts = 2 * subtop->alasts + 1; + re_sub_match_last_t **new_array = re_realloc (subtop->lasts, + re_sub_match_last_t *, + new_alasts); + if (BE (new_array == NULL, 0)) + return NULL; + subtop->lasts = new_array; + subtop->alasts = new_alasts; + } + new_entry = calloc (1, sizeof (re_sub_match_last_t)); + if (BE (new_entry != NULL, 1)) + { + subtop->lasts[subtop->nlasts] = new_entry; + new_entry->node = node; + new_entry->str_idx = str_idx; + ++subtop->nlasts; + } + return new_entry; +} + +static void +internal_function +sift_ctx_init (re_sift_context_t *sctx, re_dfastate_t **sifted_sts, + re_dfastate_t **limited_sts, int last_node, int last_str_idx) +{ + sctx->sifted_states = sifted_sts; + sctx->limited_states = limited_sts; + sctx->last_node = last_node; + sctx->last_str_idx = last_str_idx; + re_node_set_init_empty (&sctx->limits); +} diff --git a/compat/setenv.c b/compat/setenv.c new file mode 100644 index 0000000000..fc1439a643 --- /dev/null +++ b/compat/setenv.c @@ -0,0 +1,40 @@ +#include "../git-compat-util.h" + +int gitsetenv(const char *name, const char *value, int replace) +{ + int out; + size_t namelen, valuelen; + char *envstr; + + if (!name || strchr(name, '=') || !value) { + errno = EINVAL; + return -1; + } + if (!replace) { + char *oldval = NULL; + oldval = getenv(name); + if (oldval) return 0; + } + + namelen = strlen(name); + valuelen = strlen(value); + envstr = malloc((namelen + valuelen + 2)); + if (!envstr) { + errno = ENOMEM; + return -1; + } + + memcpy(envstr, name, namelen); + envstr[namelen] = '='; + memcpy(envstr + namelen + 1, value, valuelen); + envstr[namelen + valuelen + 1] = 0; + + out = putenv(envstr); + /* putenv(3) makes the argument string part of the environment, + * and changing that string modifies the environment --- which + * means we do not own that storage anymore. Do not free + * envstr. + */ + + return out; +} diff --git a/compat/snprintf.c b/compat/snprintf.c new file mode 100644 index 0000000000..42ea1ac110 --- /dev/null +++ b/compat/snprintf.c @@ -0,0 +1,69 @@ +#include "../git-compat-util.h" + +/* + * The size parameter specifies the available space, i.e. includes + * the trailing NUL byte; but Windows's vsnprintf uses the entire + * buffer and avoids the trailing NUL, should the buffer be exactly + * big enough for the result. Defining SNPRINTF_SIZE_CORR to 1 will + * therefore remove 1 byte from the reported buffer size, so we + * always have room for a trailing NUL byte. + */ +#ifndef SNPRINTF_SIZE_CORR +#if defined(WIN32) && (!defined(__GNUC__) || __GNUC__ < 4) +#define SNPRINTF_SIZE_CORR 1 +#else +#define SNPRINTF_SIZE_CORR 0 +#endif +#endif + +#undef vsnprintf +int git_vsnprintf(char *str, size_t maxsize, const char *format, va_list ap) +{ + va_list cp; + char *s; + int ret = -1; + + if (maxsize > 0) { + va_copy(cp, ap); + ret = vsnprintf(str, maxsize-SNPRINTF_SIZE_CORR, format, cp); + va_end(cp); + if (ret == maxsize-1) + ret = -1; + /* Windows does not NUL-terminate if result fills buffer */ + str[maxsize-1] = 0; + } + if (ret != -1) + return ret; + + s = NULL; + if (maxsize < 128) + maxsize = 128; + + while (ret == -1) { + maxsize *= 4; + str = realloc(s, maxsize); + if (! str) + break; + s = str; + va_copy(cp, ap); + ret = vsnprintf(str, maxsize-SNPRINTF_SIZE_CORR, format, cp); + va_end(cp); + if (ret == maxsize-1) + ret = -1; + } + free(s); + return ret; +} + +int git_snprintf(char *str, size_t maxsize, const char *format, ...) +{ + va_list ap; + int ret; + + va_start(ap, format); + ret = git_vsnprintf(str, maxsize, format, ap); + va_end(ap); + + return ret; +} + diff --git a/compat/strcasestr.c b/compat/strcasestr.c new file mode 100644 index 0000000000..26896deca6 --- /dev/null +++ b/compat/strcasestr.c @@ -0,0 +1,22 @@ +#include "../git-compat-util.h" + +char *gitstrcasestr(const char *haystack, const char *needle) +{ + int nlen = strlen(needle); + int hlen = strlen(haystack) - nlen + 1; + int i; + + for (i = 0; i < hlen; i++) { + int j; + for (j = 0; j < nlen; j++) { + unsigned char c1 = haystack[i+j]; + unsigned char c2 = needle[j]; + if (toupper(c1) != toupper(c2)) + goto next; + } + return (char *) haystack + i; + next: + ; + } + return NULL; +} diff --git a/compat/strlcpy.c b/compat/strlcpy.c new file mode 100644 index 0000000000..4024c36030 --- /dev/null +++ b/compat/strlcpy.c @@ -0,0 +1,13 @@ +#include "../git-compat-util.h" + +size_t gitstrlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = strlen(src); + + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} diff --git a/compat/strtoimax.c b/compat/strtoimax.c new file mode 100644 index 0000000000..ac09ed89e7 --- /dev/null +++ b/compat/strtoimax.c @@ -0,0 +1,10 @@ +#include "../git-compat-util.h" + +intmax_t gitstrtoimax (const char *nptr, char **endptr, int base) +{ +#if defined(NO_STRTOULL) + return strtol(nptr, endptr, base); +#else + return strtoll(nptr, endptr, base); +#endif +} diff --git a/compat/strtoumax.c b/compat/strtoumax.c new file mode 100644 index 0000000000..5541353a77 --- /dev/null +++ b/compat/strtoumax.c @@ -0,0 +1,10 @@ +#include "../git-compat-util.h" + +uintmax_t gitstrtoumax (const char *nptr, char **endptr, int base) +{ +#if defined(NO_STRTOULL) + return strtoul(nptr, endptr, base); +#else + return strtoull(nptr, endptr, base); +#endif +} diff --git a/compat/terminal.c b/compat/terminal.c new file mode 100644 index 0000000000..313897d581 --- /dev/null +++ b/compat/terminal.c @@ -0,0 +1,147 @@ +#include "git-compat-util.h" +#include "compat/terminal.h" +#include "sigchain.h" +#include "strbuf.h" + +#if defined(HAVE_DEV_TTY) || defined(GIT_WINDOWS_NATIVE) + +static void restore_term(void); + +static void restore_term_on_signal(int sig) +{ + restore_term(); + sigchain_pop(sig); + raise(sig); +} + +#ifdef HAVE_DEV_TTY + +#define INPUT_PATH "/dev/tty" +#define OUTPUT_PATH "/dev/tty" + +static int term_fd = -1; +static struct termios old_term; + +static void restore_term(void) +{ + if (term_fd < 0) + return; + + tcsetattr(term_fd, TCSAFLUSH, &old_term); + close(term_fd); + term_fd = -1; +} + +static int disable_echo(void) +{ + struct termios t; + + term_fd = open("/dev/tty", O_RDWR); + if (tcgetattr(term_fd, &t) < 0) + goto error; + + old_term = t; + sigchain_push_common(restore_term_on_signal); + + t.c_lflag &= ~ECHO; + if (!tcsetattr(term_fd, TCSAFLUSH, &t)) + return 0; + +error: + close(term_fd); + term_fd = -1; + return -1; +} + +#elif defined(GIT_WINDOWS_NATIVE) + +#define INPUT_PATH "CONIN$" +#define OUTPUT_PATH "CONOUT$" +#define FORCE_TEXT "t" + +static HANDLE hconin = INVALID_HANDLE_VALUE; +static DWORD cmode; + +static void restore_term(void) +{ + if (hconin == INVALID_HANDLE_VALUE) + return; + + SetConsoleMode(hconin, cmode); + CloseHandle(hconin); + hconin = INVALID_HANDLE_VALUE; +} + +static int disable_echo(void) +{ + hconin = CreateFile("CONIN$", GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ, NULL, OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, NULL); + if (hconin == INVALID_HANDLE_VALUE) + return -1; + + GetConsoleMode(hconin, &cmode); + sigchain_push_common(restore_term_on_signal); + if (!SetConsoleMode(hconin, cmode & (~ENABLE_ECHO_INPUT))) { + CloseHandle(hconin); + hconin = INVALID_HANDLE_VALUE; + return -1; + } + + return 0; +} + +#endif + +#ifndef FORCE_TEXT +#define FORCE_TEXT +#endif + +char *git_terminal_prompt(const char *prompt, int echo) +{ + static struct strbuf buf = STRBUF_INIT; + int r; + FILE *input_fh, *output_fh; + + input_fh = fopen(INPUT_PATH, "r" FORCE_TEXT); + if (!input_fh) + return NULL; + + output_fh = fopen(OUTPUT_PATH, "w" FORCE_TEXT); + if (!output_fh) { + fclose(input_fh); + return NULL; + } + + if (!echo && disable_echo()) { + fclose(input_fh); + fclose(output_fh); + return NULL; + } + + fputs(prompt, output_fh); + fflush(output_fh); + + r = strbuf_getline(&buf, input_fh, '\n'); + if (!echo) { + putc('\n', output_fh); + fflush(output_fh); + } + + restore_term(); + fclose(input_fh); + fclose(output_fh); + + if (r == EOF) + return NULL; + return buf.buf; +} + +#else + +char *git_terminal_prompt(const char *prompt, int echo) +{ + return getpass(prompt); +} + +#endif diff --git a/compat/terminal.h b/compat/terminal.h new file mode 100644 index 0000000000..97db7cd69d --- /dev/null +++ b/compat/terminal.h @@ -0,0 +1,6 @@ +#ifndef COMPAT_TERMINAL_H +#define COMPAT_TERMINAL_H + +char *git_terminal_prompt(const char *prompt, int echo); + +#endif /* COMPAT_TERMINAL_H */ diff --git a/compat/unsetenv.c b/compat/unsetenv.c new file mode 100644 index 0000000000..bf5fd7063b --- /dev/null +++ b/compat/unsetenv.c @@ -0,0 +1,27 @@ +#include "../git-compat-util.h" + +void gitunsetenv (const char *name) +{ +#if !defined(__MINGW32__) + extern char **environ; +#endif + int src, dst; + size_t nmln; + + nmln = strlen(name); + + for (src = dst = 0; environ[src]; ++src) { + size_t enln; + enln = strlen(environ[src]); + if (enln > nmln) { + /* might match, and can test for '=' safely */ + if (0 == strncmp (environ[src], name, nmln) + && '=' == environ[src][nmln]) + /* matches, so skip */ + continue; + } + environ[dst] = environ[src]; + ++dst; + } + environ[dst] = NULL; +} diff --git a/compat/vcbuild/README b/compat/vcbuild/README new file mode 100644 index 0000000000..df8a6574c9 --- /dev/null +++ b/compat/vcbuild/README @@ -0,0 +1,50 @@ +The Steps of Build Git with VS2008 + +1. You need the build environment, which contains the Git dependencies + to be able to compile, link and run Git with MSVC. + + You can either use the binary repository: + + WWW: http://repo.or.cz/w/msvcgit.git + Git: git clone git://repo.or.cz/msvcgit.git + Zip: http://repo.or.cz/w/msvcgit.git?a=snapshot;h=master;sf=zip + + and call the setup_32bit_env.cmd batch script before compiling Git, + (see repo/package README for details), or the source repository: + + WWW: http://repo.or.cz/w/gitbuild.git + Git: git clone git://repo.or.cz/gitbuild.git + Zip: (None, as it's a project with submodules) + + and build the support libs as instructed in that repo/package. + +2. Ensure you have the msysgit environment in your path, so you have + GNU Make, bash and perl available. + + WWW: http://repo.or.cz/w/msysgit.git + Git: git clone git://repo.or.cz/msysgit.git + Zip: http://repo.or.cz/w/msysgit.git?a=snapshot;h=master;sf=zip + + This environment is also needed when you use the resulting + executables, since Git might need to run scripts which are part of + the git operations. + +3. Inside Git's directory run the command: + make common-cmds.h + to generate the common-cmds.h file needed to compile git. + +4. Then either build Git with the GNU Make Makefile in the Git projects + root + make MSVC=1 + or generate Visual Studio solution/projects (.sln/.vcproj) with the + command + perl contrib/buildsystems/generate -g Vcproj + and open and build the solution with the IDE + devenv git.sln /useenv + or build with the IDE build engine directly from the command line + devenv git.sln /useenv /build "Release|Win32" + The /useenv option is required, so Visual Studio picks up the + environment variables for the support libraries required to build + Git, which you set up in step 1. + +Done! diff --git a/compat/vcbuild/include/sys/param.h b/compat/vcbuild/include/sys/param.h new file mode 100644 index 0000000000..0d8552a2c6 --- /dev/null +++ b/compat/vcbuild/include/sys/param.h @@ -0,0 +1 @@ +/* Intentionally empty file to support building git with MSVC */ diff --git a/compat/vcbuild/include/sys/time.h b/compat/vcbuild/include/sys/time.h new file mode 100644 index 0000000000..0d8552a2c6 --- /dev/null +++ b/compat/vcbuild/include/sys/time.h @@ -0,0 +1 @@ +/* Intentionally empty file to support building git with MSVC */ diff --git a/compat/vcbuild/include/sys/utime.h b/compat/vcbuild/include/sys/utime.h new file mode 100644 index 0000000000..582589c70a --- /dev/null +++ b/compat/vcbuild/include/sys/utime.h @@ -0,0 +1,34 @@ +#ifndef _UTIME_H_ +#define _UTIME_H_ +/* + * UTIME.H + * This file has no copyright assigned and is placed in the Public Domain. + * This file is a part of the mingw-runtime package. + * + * The mingw-runtime package and its code is distributed in the hope that it + * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR + * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to + * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * You are free to use this package and its code without limitation. + */ + +/* + * Structure used by _utime function. + */ +struct _utimbuf +{ + time_t actime; /* Access time */ + time_t modtime; /* Modification time */ +}; + +#ifndef _NO_OLDNAMES +/* NOTE: Must be the same as _utimbuf above. */ +struct utimbuf +{ + time_t actime; + time_t modtime; +}; +#endif /* Not _NO_OLDNAMES */ + +#endif diff --git a/compat/vcbuild/include/unistd.h b/compat/vcbuild/include/unistd.h new file mode 100644 index 0000000000..c65c2cd566 --- /dev/null +++ b/compat/vcbuild/include/unistd.h @@ -0,0 +1,99 @@ +#ifndef _UNISTD_ +#define _UNISTD_ + +/* Win32 define for porting git*/ + +#ifndef _MODE_T_ +#define _MODE_T_ +typedef unsigned short _mode_t; + +#ifndef _NO_OLDNAMES +typedef _mode_t mode_t; +#endif +#endif /* Not _MODE_T_ */ + +#ifndef _SSIZE_T_ +#define _SSIZE_T_ +typedef long _ssize_t; + +#ifndef _OFF_T_ +#define _OFF_T_ +typedef long _off_t; + +#ifndef _NO_OLDNAMES +typedef _off_t off_t; +#endif +#endif /* Not _OFF_T_ */ + + +#ifndef _NO_OLDNAMES +typedef _ssize_t ssize_t; +#endif +#endif /* Not _SSIZE_T_ */ + +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef short int16_t; +typedef unsigned short uint16_t; +typedef int int32_t; +typedef unsigned uint32_t; +typedef long long int64_t; +typedef unsigned long long uint64_t; + +typedef long long intmax_t; +typedef unsigned long long uintmax_t; + +typedef int64_t off64_t; + +#define INTMAX_MIN _I64_MIN +#define INTMAX_MAX _I64_MAX +#define UINTMAX_MAX _UI64_MAX + +#define UINT32_MAX 0xffffffff /* 4294967295U */ + +#define STDIN_FILENO 0 +#define STDOUT_FILENO 1 +#define STDERR_FILENO 2 + +/* Some defines for _access nAccessMode (MS doesn't define them, but + * it doesn't seem to hurt to add them). */ +#define F_OK 0 /* Check for file existence */ +/* Well maybe it does hurt. On newer versions of MSVCRT, an access mode + of 1 causes invalid parameter error. */ +#define X_OK 0 /* MS access() doesn't check for execute permission. */ +#define W_OK 2 /* Check for write permission */ +#define R_OK 4 /* Check for read permission */ + +#define _S_IFIFO 0x1000 /* FIFO */ +#define _S_IFCHR 0x2000 /* Character */ +#define _S_IFBLK 0x3000 /* Block: Is this ever set under w32? */ +#define _S_IFDIR 0x4000 /* Directory */ +#define _S_IFREG 0x8000 /* Regular */ + +#define _S_IFMT 0xF000 /* File type mask */ + +#define _S_IXUSR _S_IEXEC +#define _S_IWUSR _S_IWRITE +#define _S_IRUSR _S_IREAD +#define _S_ISDIR(m) (((m) & _S_IFMT) == _S_IFDIR) + +#define S_IFIFO _S_IFIFO +#define S_IFCHR _S_IFCHR +#define S_IFBLK _S_IFBLK +#define S_IFDIR _S_IFDIR +#define S_IFREG _S_IFREG +#define S_IFMT _S_IFMT +#define S_IEXEC _S_IEXEC +#define S_IWRITE _S_IWRITE +#define S_IREAD _S_IREAD +#define S_IRWXU _S_IRWXU +#define S_IXUSR _S_IXUSR +#define S_IWUSR _S_IWUSR +#define S_IRUSR _S_IRUSR + + +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) + +#endif diff --git a/compat/vcbuild/include/utime.h b/compat/vcbuild/include/utime.h new file mode 100644 index 0000000000..8285f38fde --- /dev/null +++ b/compat/vcbuild/include/utime.h @@ -0,0 +1 @@ +#include <sys/utime.h> diff --git a/compat/vcbuild/scripts/clink.pl b/compat/vcbuild/scripts/clink.pl new file mode 100755 index 0000000000..a87d0da512 --- /dev/null +++ b/compat/vcbuild/scripts/clink.pl @@ -0,0 +1,54 @@ +#!/usr/bin/perl -w +###################################################################### +# Compiles or links files +# +# This is a wrapper to facilitate the compilation of Git with MSVC +# using GNU Make as the build system. So, instead of manipulating the +# Makefile into something nasty, just to support non-space arguments +# etc, we use this wrapper to fix the command line options +# +# Copyright (C) 2009 Marius Storm-Olsen <mstormo@gmail.com> +###################################################################### +use strict; +my @args = (); +my @cflags = (); +my $is_linking = 0; +while (@ARGV) { + my $arg = shift @ARGV; + if ("$arg" =~ /^-[DIMGO]/) { + push(@cflags, $arg); + } elsif ("$arg" eq "-o") { + my $file_out = shift @ARGV; + if ("$file_out" =~ /exe$/) { + $is_linking = 1; + push(@args, "-OUT:$file_out"); + } else { + push(@args, "-Fo$file_out"); + } + } elsif ("$arg" eq "-lz") { + push(@args, "zlib.lib"); + } elsif ("$arg" eq "-liconv") { + push(@args, "iconv.lib"); + } elsif ("$arg" eq "-lcrypto") { + push(@args, "libeay32.lib"); + } elsif ("$arg" eq "-lssl") { + push(@args, "ssleay32.lib"); + } elsif ("$arg" eq "-lcurl") { + push(@args, "libcurl.lib"); + } elsif ("$arg" =~ /^-L/ && "$arg" ne "-LTCG") { + $arg =~ s/^-L/-LIBPATH:/; + push(@args, $arg); + } elsif ("$arg" =~ /^-R/) { + # eat + } else { + push(@args, $arg); + } +} +if ($is_linking) { + unshift(@args, "link.exe"); +} else { + unshift(@args, "cl.exe"); + push(@args, @cflags); +} +#printf("**** @args\n"); +exit (system(@args) != 0); diff --git a/compat/vcbuild/scripts/lib.pl b/compat/vcbuild/scripts/lib.pl new file mode 100755 index 0000000000..d8054e469f --- /dev/null +++ b/compat/vcbuild/scripts/lib.pl @@ -0,0 +1,26 @@ +#!/usr/bin/perl -w +###################################################################### +# Libifies files on Windows +# +# This is a wrapper to facilitate the compilation of Git with MSVC +# using GNU Make as the build system. So, instead of manipulating the +# Makefile into something nasty, just to support non-space arguments +# etc, we use this wrapper to fix the command line options +# +# Copyright (C) 2009 Marius Storm-Olsen <mstormo@gmail.com> +###################################################################### +use strict; +my @args = (); +while (@ARGV) { + my $arg = shift @ARGV; + if ("$arg" eq "rcs") { + # Consume the rcs option + } elsif ("$arg" =~ /\.a$/) { + push(@args, "-OUT:$arg"); + } else { + push(@args, $arg); + } +} +unshift(@args, "lib.exe"); +# printf("**** @args\n"); +exit (system(@args) != 0); diff --git a/compat/win32.h b/compat/win32.h new file mode 100644 index 0000000000..a97e880757 --- /dev/null +++ b/compat/win32.h @@ -0,0 +1,41 @@ +#ifndef WIN32_H +#define WIN32_H + +/* common Win32 functions for MinGW and Cygwin */ +#ifndef GIT_WINDOWS_NATIVE /* Not defined for Cygwin */ +#include <windows.h> +#endif + +static inline int file_attr_to_st_mode (DWORD attr) +{ + int fMode = S_IREAD; + if (attr & FILE_ATTRIBUTE_DIRECTORY) + fMode |= S_IFDIR; + else + fMode |= S_IFREG; + if (!(attr & FILE_ATTRIBUTE_READONLY)) + fMode |= S_IWRITE; + return fMode; +} + +static inline int get_file_attr(const char *fname, WIN32_FILE_ATTRIBUTE_DATA *fdata) +{ + if (GetFileAttributesExA(fname, GetFileExInfoStandard, fdata)) + return 0; + + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_LOCK_VIOLATION: + case ERROR_SHARING_BUFFER_EXCEEDED: + return EACCES; + case ERROR_BUFFER_OVERFLOW: + return ENAMETOOLONG; + case ERROR_NOT_ENOUGH_MEMORY: + return ENOMEM; + default: + return ENOENT; + } +} + +#endif diff --git a/compat/win32/alloca.h b/compat/win32/alloca.h new file mode 100644 index 0000000000..c0d7985b7e --- /dev/null +++ b/compat/win32/alloca.h @@ -0,0 +1 @@ +#include <malloc.h> diff --git a/compat/win32/dirent.c b/compat/win32/dirent.c new file mode 100644 index 0000000000..52420ec7d4 --- /dev/null +++ b/compat/win32/dirent.c @@ -0,0 +1,92 @@ +#include "../../git-compat-util.h" + +struct DIR { + struct dirent dd_dir; /* includes d_type */ + HANDLE dd_handle; /* FindFirstFile handle */ + int dd_stat; /* 0-based index */ +}; + +static inline void finddata2dirent(struct dirent *ent, WIN32_FIND_DATAW *fdata) +{ + /* convert UTF-16 name to UTF-8 */ + xwcstoutf(ent->d_name, fdata->cFileName, sizeof(ent->d_name)); + + /* Set file type, based on WIN32_FIND_DATA */ + if (fdata->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) + ent->d_type = DT_DIR; + else + ent->d_type = DT_REG; +} + +DIR *opendir(const char *name) +{ + wchar_t pattern[MAX_PATH + 2]; /* + 2 for '/' '*' */ + WIN32_FIND_DATAW fdata; + HANDLE h; + int len; + DIR *dir; + + /* convert name to UTF-16 and check length < MAX_PATH */ + if ((len = xutftowcs_path(pattern, name)) < 0) + return NULL; + + /* append optional '/' and wildcard '*' */ + if (len && !is_dir_sep(pattern[len - 1])) + pattern[len++] = '/'; + pattern[len++] = '*'; + pattern[len] = 0; + + /* open find handle */ + h = FindFirstFileW(pattern, &fdata); + if (h == INVALID_HANDLE_VALUE) { + DWORD err = GetLastError(); + errno = (err == ERROR_DIRECTORY) ? ENOTDIR : err_win_to_posix(err); + return NULL; + } + + /* initialize DIR structure and copy first dir entry */ + dir = xmalloc(sizeof(DIR)); + dir->dd_handle = h; + dir->dd_stat = 0; + finddata2dirent(&dir->dd_dir, &fdata); + return dir; +} + +struct dirent *readdir(DIR *dir) +{ + if (!dir) { + errno = EBADF; /* No set_errno for mingw */ + return NULL; + } + + /* if first entry, dirent has already been set up by opendir */ + if (dir->dd_stat) { + /* get next entry and convert from WIN32_FIND_DATA to dirent */ + WIN32_FIND_DATAW fdata; + if (FindNextFileW(dir->dd_handle, &fdata)) { + finddata2dirent(&dir->dd_dir, &fdata); + } else { + DWORD lasterr = GetLastError(); + /* POSIX says you shouldn't set errno when readdir can't + find any more files; so, if another error we leave it set. */ + if (lasterr != ERROR_NO_MORE_FILES) + errno = err_win_to_posix(lasterr); + return NULL; + } + } + + ++dir->dd_stat; + return &dir->dd_dir; +} + +int closedir(DIR *dir) +{ + if (!dir) { + errno = EBADF; + return -1; + } + + FindClose(dir->dd_handle); + free(dir); + return 0; +} diff --git a/compat/win32/dirent.h b/compat/win32/dirent.h new file mode 100644 index 0000000000..058207e4bf --- /dev/null +++ b/compat/win32/dirent.h @@ -0,0 +1,20 @@ +#ifndef DIRENT_H +#define DIRENT_H + +typedef struct DIR DIR; + +#define DT_UNKNOWN 0 +#define DT_DIR 1 +#define DT_REG 2 +#define DT_LNK 3 + +struct dirent { + unsigned char d_type; /* file type to prevent lstat after readdir */ + char d_name[MAX_PATH * 3]; /* file name (* 3 for UTF-8 conversion) */ +}; + +DIR *opendir(const char *dirname); +struct dirent *readdir(DIR *dir); +int closedir(DIR *dir); + +#endif /* DIRENT_H */ diff --git a/compat/win32/pthread.c b/compat/win32/pthread.c new file mode 100644 index 0000000000..e18f5c6e2e --- /dev/null +++ b/compat/win32/pthread.c @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2009 Andrzej K. Haczewski <ahaczewski@gmail.com> + * + * DISCLAIMER: The implementation is Git-specific, it is subset of original + * Pthreads API, without lots of other features that Git doesn't use. + * Git also makes sure that the passed arguments are valid, so there's + * no need for double-checking. + */ + +#include "../../git-compat-util.h" +#include "pthread.h" + +#include <errno.h> +#include <limits.h> + +static unsigned __stdcall win32_start_routine(void *arg) +{ + pthread_t *thread = arg; + thread->tid = GetCurrentThreadId(); + thread->arg = thread->start_routine(thread->arg); + return 0; +} + +int pthread_create(pthread_t *thread, const void *unused, + void *(*start_routine)(void*), void *arg) +{ + thread->arg = arg; + thread->start_routine = start_routine; + thread->handle = (HANDLE) + _beginthreadex(NULL, 0, win32_start_routine, thread, 0, NULL); + + if (!thread->handle) + return errno; + else + return 0; +} + +int win32_pthread_join(pthread_t *thread, void **value_ptr) +{ + DWORD result = WaitForSingleObject(thread->handle, INFINITE); + switch (result) { + case WAIT_OBJECT_0: + if (value_ptr) + *value_ptr = thread->arg; + return 0; + case WAIT_ABANDONED: + return EINVAL; + default: + return err_win_to_posix(GetLastError()); + } +} + +pthread_t pthread_self(void) +{ + pthread_t t = { NULL }; + t.tid = GetCurrentThreadId(); + return t; +} + +int pthread_cond_init(pthread_cond_t *cond, const void *unused) +{ + cond->waiters = 0; + cond->was_broadcast = 0; + InitializeCriticalSection(&cond->waiters_lock); + + cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL); + if (!cond->sema) + die("CreateSemaphore() failed"); + + cond->continue_broadcast = CreateEvent(NULL, /* security */ + FALSE, /* auto-reset */ + FALSE, /* not signaled */ + NULL); /* name */ + if (!cond->continue_broadcast) + die("CreateEvent() failed"); + + return 0; +} + +int pthread_cond_destroy(pthread_cond_t *cond) +{ + CloseHandle(cond->sema); + CloseHandle(cond->continue_broadcast); + DeleteCriticalSection(&cond->waiters_lock); + return 0; +} + +int pthread_cond_wait(pthread_cond_t *cond, CRITICAL_SECTION *mutex) +{ + int last_waiter; + + EnterCriticalSection(&cond->waiters_lock); + cond->waiters++; + LeaveCriticalSection(&cond->waiters_lock); + + /* + * Unlock external mutex and wait for signal. + * NOTE: we've held mutex locked long enough to increment + * waiters count above, so there's no problem with + * leaving mutex unlocked before we wait on semaphore. + */ + LeaveCriticalSection(mutex); + + /* let's wait - ignore return value */ + WaitForSingleObject(cond->sema, INFINITE); + + /* + * Decrease waiters count. If we are the last waiter, then we must + * notify the broadcasting thread that it can continue. + * But if we continued due to cond_signal, we do not have to do that + * because the signaling thread knows that only one waiter continued. + */ + EnterCriticalSection(&cond->waiters_lock); + cond->waiters--; + last_waiter = cond->was_broadcast && cond->waiters == 0; + LeaveCriticalSection(&cond->waiters_lock); + + if (last_waiter) { + /* + * cond_broadcast was issued while mutex was held. This means + * that all other waiters have continued, but are contending + * for the mutex at the end of this function because the + * broadcasting thread did not leave cond_broadcast, yet. + * (This is so that it can be sure that each waiter has + * consumed exactly one slice of the semaphor.) + * The last waiter must tell the broadcasting thread that it + * can go on. + */ + SetEvent(cond->continue_broadcast); + /* + * Now we go on to contend with all other waiters for + * the mutex. Auf in den Kampf! + */ + } + /* lock external mutex again */ + EnterCriticalSection(mutex); + + return 0; +} + +/* + * IMPORTANT: This implementation requires that pthread_cond_signal + * is called while the mutex is held that is used in the corresponding + * pthread_cond_wait calls! + */ +int pthread_cond_signal(pthread_cond_t *cond) +{ + int have_waiters; + + EnterCriticalSection(&cond->waiters_lock); + have_waiters = cond->waiters > 0; + LeaveCriticalSection(&cond->waiters_lock); + + /* + * Signal only when there are waiters + */ + if (have_waiters) + return ReleaseSemaphore(cond->sema, 1, NULL) ? + 0 : err_win_to_posix(GetLastError()); + else + return 0; +} + +/* + * DOUBLY IMPORTANT: This implementation requires that pthread_cond_broadcast + * is called while the mutex is held that is used in the corresponding + * pthread_cond_wait calls! + */ +int pthread_cond_broadcast(pthread_cond_t *cond) +{ + EnterCriticalSection(&cond->waiters_lock); + + if ((cond->was_broadcast = cond->waiters > 0)) { + /* wake up all waiters */ + ReleaseSemaphore(cond->sema, cond->waiters, NULL); + LeaveCriticalSection(&cond->waiters_lock); + /* + * At this point all waiters continue. Each one takes its + * slice of the semaphor. Now it's our turn to wait: Since + * the external mutex is held, no thread can leave cond_wait, + * yet. For this reason, we can be sure that no thread gets + * a chance to eat *more* than one slice. OTOH, it means + * that the last waiter must send us a wake-up. + */ + WaitForSingleObject(cond->continue_broadcast, INFINITE); + /* + * Since the external mutex is held, no thread can enter + * cond_wait, and, hence, it is safe to reset this flag + * without cond->waiters_lock held. + */ + cond->was_broadcast = 0; + } else { + LeaveCriticalSection(&cond->waiters_lock); + } + return 0; +} diff --git a/compat/win32/pthread.h b/compat/win32/pthread.h new file mode 100644 index 0000000000..8ad187344f --- /dev/null +++ b/compat/win32/pthread.h @@ -0,0 +1,104 @@ +/* + * Header used to adapt pthread-based POSIX code to Windows API threads. + * + * Copyright (C) 2009 Andrzej K. Haczewski <ahaczewski@gmail.com> + */ + +#ifndef PTHREAD_H +#define PTHREAD_H + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif + +#include <windows.h> + +/* + * Defines that adapt Windows API threads to pthreads API + */ +#define pthread_mutex_t CRITICAL_SECTION + +#define pthread_mutex_init(a,b) (InitializeCriticalSection((a)), 0) +#define pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define pthread_mutex_lock EnterCriticalSection +#define pthread_mutex_unlock LeaveCriticalSection + +typedef int pthread_mutexattr_t; +#define pthread_mutexattr_init(a) (*(a) = 0) +#define pthread_mutexattr_destroy(a) do {} while (0) +#define pthread_mutexattr_settype(a, t) 0 +#define PTHREAD_MUTEX_RECURSIVE 0 + +/* + * Implement simple condition variable for Windows threads, based on ACE + * implementation. + * + * See original implementation: http://bit.ly/1vkDjo + * ACE homepage: http://www.cse.wustl.edu/~schmidt/ACE.html + * See also: http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ +typedef struct { + LONG waiters; + int was_broadcast; + CRITICAL_SECTION waiters_lock; + HANDLE sema; + HANDLE continue_broadcast; +} pthread_cond_t; + +extern int pthread_cond_init(pthread_cond_t *cond, const void *unused); +extern int pthread_cond_destroy(pthread_cond_t *cond); +extern int pthread_cond_wait(pthread_cond_t *cond, CRITICAL_SECTION *mutex); +extern int pthread_cond_signal(pthread_cond_t *cond); +extern int pthread_cond_broadcast(pthread_cond_t *cond); + +/* + * Simple thread creation implementation using pthread API + */ +typedef struct { + HANDLE handle; + void *(*start_routine)(void*); + void *arg; + DWORD tid; +} pthread_t; + +extern int pthread_create(pthread_t *thread, const void *unused, + void *(*start_routine)(void*), void *arg); + +/* + * To avoid the need of copying a struct, we use small macro wrapper to pass + * pointer to win32_pthread_join instead. + */ +#define pthread_join(a, b) win32_pthread_join(&(a), (b)) + +extern int win32_pthread_join(pthread_t *thread, void **value_ptr); + +#define pthread_equal(t1, t2) ((t1).tid == (t2).tid) +extern pthread_t pthread_self(void); + +static inline int pthread_exit(void *ret) +{ + ExitThread((DWORD)ret); +} + +typedef DWORD pthread_key_t; +static inline int pthread_key_create(pthread_key_t *keyp, void (*destructor)(void *value)) +{ + return (*keyp = TlsAlloc()) == TLS_OUT_OF_INDEXES ? EAGAIN : 0; +} + +static inline int pthread_key_delete(pthread_key_t key) +{ + return TlsFree(key) ? 0 : EINVAL; +} + +static inline int pthread_setspecific(pthread_key_t key, const void *value) +{ + return TlsSetValue(key, (void *)value) ? 0 : EINVAL; +} + +static inline void *pthread_getspecific(pthread_key_t key) +{ + return TlsGetValue(key); +} + +#endif /* PTHREAD_H */ diff --git a/compat/win32/syslog.c b/compat/win32/syslog.c new file mode 100644 index 0000000000..d015e436d5 --- /dev/null +++ b/compat/win32/syslog.c @@ -0,0 +1,78 @@ +#include "../../git-compat-util.h" + +static HANDLE ms_eventlog; + +void openlog(const char *ident, int logopt, int facility) +{ + if (ms_eventlog) + return; + + ms_eventlog = RegisterEventSourceA(NULL, ident); + + if (!ms_eventlog) + warning("RegisterEventSource() failed: %lu", GetLastError()); +} + +void syslog(int priority, const char *fmt, ...) +{ + WORD logtype; + char *str, *pos; + int str_len; + va_list ap; + + if (!ms_eventlog) + return; + + va_start(ap, fmt); + str_len = vsnprintf(NULL, 0, fmt, ap); + va_end(ap); + + if (str_len < 0) { + warning("vsnprintf failed: '%s'", strerror(errno)); + return; + } + + str = malloc(str_len + 1); + if (!str) { + warning("malloc failed: '%s'", strerror(errno)); + return; + } + + va_start(ap, fmt); + vsnprintf(str, str_len + 1, fmt, ap); + va_end(ap); + + while ((pos = strstr(str, "%1")) != NULL) { + str = realloc(str, ++str_len + 1); + if (!str) { + warning("realloc failed: '%s'", strerror(errno)); + return; + } + memmove(pos + 2, pos + 1, strlen(pos)); + pos[1] = ' '; + } + + switch (priority) { + case LOG_EMERG: + case LOG_ALERT: + case LOG_CRIT: + case LOG_ERR: + logtype = EVENTLOG_ERROR_TYPE; + break; + + case LOG_WARNING: + logtype = EVENTLOG_WARNING_TYPE; + break; + + case LOG_NOTICE: + case LOG_INFO: + case LOG_DEBUG: + default: + logtype = EVENTLOG_INFORMATION_TYPE; + break; + } + + ReportEventA(ms_eventlog, logtype, 0, 0, NULL, 1, 0, + (const char **)&str, NULL); + free(str); +} diff --git a/compat/win32/syslog.h b/compat/win32/syslog.h new file mode 100644 index 0000000000..70daa7c08b --- /dev/null +++ b/compat/win32/syslog.h @@ -0,0 +1,20 @@ +#ifndef SYSLOG_H +#define SYSLOG_H + +#define LOG_PID 0x01 + +#define LOG_EMERG 0 +#define LOG_ALERT 1 +#define LOG_CRIT 2 +#define LOG_ERR 3 +#define LOG_WARNING 4 +#define LOG_NOTICE 5 +#define LOG_INFO 6 +#define LOG_DEBUG 7 + +#define LOG_DAEMON (3<<3) + +void openlog(const char *ident, int logopt, int facility); +void syslog(int priority, const char *fmt, ...); + +#endif /* SYSLOG_H */ diff --git a/compat/win32mmap.c b/compat/win32mmap.c new file mode 100644 index 0000000000..80a8c9af4f --- /dev/null +++ b/compat/win32mmap.c @@ -0,0 +1,41 @@ +#include "../git-compat-util.h" + +void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset) +{ + HANDLE hmap; + void *temp; + off_t len; + struct stat st; + uint64_t o = offset; + uint32_t l = o & 0xFFFFFFFF; + uint32_t h = (o >> 32) & 0xFFFFFFFF; + + if (!fstat(fd, &st)) + len = st.st_size; + else + die("mmap: could not determine filesize"); + + if ((length + offset) > len) + length = xsize_t(len - offset); + + if (!(flags & MAP_PRIVATE)) + die("Invalid usage of mmap when built with USE_WIN32_MMAP"); + + hmap = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL, + PAGE_WRITECOPY, 0, 0, NULL); + + if (!hmap) + return MAP_FAILED; + + temp = MapViewOfFileEx(hmap, FILE_MAP_COPY, h, l, length, start); + + if (!CloseHandle(hmap)) + warning("unable to close file mapping handle"); + + return temp ? temp : MAP_FAILED; +} + +int git_munmap(void *start, size_t length) +{ + return !UnmapViewOfFile(start); +} diff --git a/compat/winansi.c b/compat/winansi.c new file mode 100644 index 0000000000..efc5bb3a4b --- /dev/null +++ b/compat/winansi.c @@ -0,0 +1,579 @@ +/* + * Copyright 2008 Peter Harris <git@peter.is-a-geek.org> + */ + +#undef NOGDI +#include "../git-compat-util.h" +#include <wingdi.h> +#include <winreg.h> + +/* + ANSI codes used by git: m, K + + This file is git-specific. Therefore, this file does not attempt + to implement any codes that are not used by git. +*/ + +static HANDLE console; +static WORD plain_attr; +static WORD attr; +static int negative; +static int non_ascii_used = 0; +static HANDLE hthread, hread, hwrite; +static HANDLE hconsole1, hconsole2; + +#ifdef __MINGW32__ +typedef struct _CONSOLE_FONT_INFOEX { + ULONG cbSize; + DWORD nFont; + COORD dwFontSize; + UINT FontFamily; + UINT FontWeight; + WCHAR FaceName[LF_FACESIZE]; +} CONSOLE_FONT_INFOEX, *PCONSOLE_FONT_INFOEX; +#endif + +typedef BOOL (WINAPI *PGETCURRENTCONSOLEFONTEX)(HANDLE, BOOL, + PCONSOLE_FONT_INFOEX); + +static void warn_if_raster_font(void) +{ + DWORD fontFamily = 0; + PGETCURRENTCONSOLEFONTEX pGetCurrentConsoleFontEx; + + /* don't bother if output was ascii only */ + if (!non_ascii_used) + return; + + /* GetCurrentConsoleFontEx is available since Vista */ + pGetCurrentConsoleFontEx = (PGETCURRENTCONSOLEFONTEX) GetProcAddress( + GetModuleHandle("kernel32.dll"), + "GetCurrentConsoleFontEx"); + if (pGetCurrentConsoleFontEx) { + CONSOLE_FONT_INFOEX cfi; + cfi.cbSize = sizeof(cfi); + if (pGetCurrentConsoleFontEx(console, 0, &cfi)) + fontFamily = cfi.FontFamily; + } else { + /* pre-Vista: check default console font in registry */ + HKEY hkey; + if (ERROR_SUCCESS == RegOpenKeyExA(HKEY_CURRENT_USER, "Console", + 0, KEY_READ, &hkey)) { + DWORD size = sizeof(fontFamily); + RegQueryValueExA(hkey, "FontFamily", NULL, NULL, + (LPVOID) &fontFamily, &size); + RegCloseKey(hkey); + } + } + + if (!(fontFamily & TMPF_TRUETYPE)) { + const wchar_t *msg = L"\nWarning: Your console font probably " + L"doesn\'t support Unicode. If you experience strange " + L"characters in the output, consider switching to a " + L"TrueType font such as Consolas!\n"; + DWORD dummy; + WriteConsoleW(console, msg, wcslen(msg), &dummy, NULL); + } +} + +static int is_console(int fd) +{ + CONSOLE_SCREEN_BUFFER_INFO sbi; + HANDLE hcon; + + static int initialized = 0; + + /* get OS handle of the file descriptor */ + hcon = (HANDLE) _get_osfhandle(fd); + if (hcon == INVALID_HANDLE_VALUE) + return 0; + + /* check if its a device (i.e. console, printer, serial port) */ + if (GetFileType(hcon) != FILE_TYPE_CHAR) + return 0; + + /* check if its a handle to a console output screen buffer */ + if (!GetConsoleScreenBufferInfo(hcon, &sbi)) + return 0; + + /* initialize attributes */ + if (!initialized) { + console = hcon; + attr = plain_attr = sbi.wAttributes; + negative = 0; + initialized = 1; + } + + return 1; +} + +#define BUFFER_SIZE 4096 +#define MAX_PARAMS 16 + +static void write_console(unsigned char *str, size_t len) +{ + /* only called from console_thread, so a static buffer will do */ + static wchar_t wbuf[2 * BUFFER_SIZE + 1]; + DWORD dummy; + + /* convert utf-8 to utf-16 */ + int wlen = xutftowcsn(wbuf, (char*) str, ARRAY_SIZE(wbuf), len); + + /* write directly to console */ + WriteConsoleW(console, wbuf, wlen, &dummy, NULL); + + /* remember if non-ascii characters are printed */ + if (wlen != len) + non_ascii_used = 1; +} + +#define FOREGROUND_ALL (FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE) +#define BACKGROUND_ALL (BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE) + +static void set_console_attr(void) +{ + WORD attributes = attr; + if (negative) { + attributes &= ~FOREGROUND_ALL; + attributes &= ~BACKGROUND_ALL; + + /* This could probably use a bitmask + instead of a series of ifs */ + if (attr & FOREGROUND_RED) + attributes |= BACKGROUND_RED; + if (attr & FOREGROUND_GREEN) + attributes |= BACKGROUND_GREEN; + if (attr & FOREGROUND_BLUE) + attributes |= BACKGROUND_BLUE; + + if (attr & BACKGROUND_RED) + attributes |= FOREGROUND_RED; + if (attr & BACKGROUND_GREEN) + attributes |= FOREGROUND_GREEN; + if (attr & BACKGROUND_BLUE) + attributes |= FOREGROUND_BLUE; + } + SetConsoleTextAttribute(console, attributes); +} + +static void erase_in_line(void) +{ + CONSOLE_SCREEN_BUFFER_INFO sbi; + DWORD dummy; /* Needed for Windows 7 (or Vista) regression */ + + if (!console) + return; + + GetConsoleScreenBufferInfo(console, &sbi); + FillConsoleOutputCharacterA(console, ' ', + sbi.dwSize.X - sbi.dwCursorPosition.X, sbi.dwCursorPosition, + &dummy); +} + +static void set_attr(char func, const int *params, int paramlen) +{ + int i; + switch (func) { + case 'm': + for (i = 0; i < paramlen; i++) { + switch (params[i]) { + case 0: /* reset */ + attr = plain_attr; + negative = 0; + break; + case 1: /* bold */ + attr |= FOREGROUND_INTENSITY; + break; + case 2: /* faint */ + case 22: /* normal */ + attr &= ~FOREGROUND_INTENSITY; + break; + case 3: /* italic */ + /* Unsupported */ + break; + case 4: /* underline */ + case 21: /* double underline */ + /* Wikipedia says this flag does nothing */ + /* Furthermore, mingw doesn't define this flag + attr |= COMMON_LVB_UNDERSCORE; */ + break; + case 24: /* no underline */ + /* attr &= ~COMMON_LVB_UNDERSCORE; */ + break; + case 5: /* slow blink */ + case 6: /* fast blink */ + /* We don't have blink, but we do have + background intensity */ + attr |= BACKGROUND_INTENSITY; + break; + case 25: /* no blink */ + attr &= ~BACKGROUND_INTENSITY; + break; + case 7: /* negative */ + negative = 1; + break; + case 27: /* positive */ + negative = 0; + break; + case 8: /* conceal */ + case 28: /* reveal */ + /* Unsupported */ + break; + case 30: /* Black */ + attr &= ~FOREGROUND_ALL; + break; + case 31: /* Red */ + attr &= ~FOREGROUND_ALL; + attr |= FOREGROUND_RED; + break; + case 32: /* Green */ + attr &= ~FOREGROUND_ALL; + attr |= FOREGROUND_GREEN; + break; + case 33: /* Yellow */ + attr &= ~FOREGROUND_ALL; + attr |= FOREGROUND_RED | FOREGROUND_GREEN; + break; + case 34: /* Blue */ + attr &= ~FOREGROUND_ALL; + attr |= FOREGROUND_BLUE; + break; + case 35: /* Magenta */ + attr &= ~FOREGROUND_ALL; + attr |= FOREGROUND_RED | FOREGROUND_BLUE; + break; + case 36: /* Cyan */ + attr &= ~FOREGROUND_ALL; + attr |= FOREGROUND_GREEN | FOREGROUND_BLUE; + break; + case 37: /* White */ + attr |= FOREGROUND_RED | + FOREGROUND_GREEN | + FOREGROUND_BLUE; + break; + case 38: /* Unknown */ + break; + case 39: /* reset */ + attr &= ~FOREGROUND_ALL; + attr |= (plain_attr & FOREGROUND_ALL); + break; + case 40: /* Black */ + attr &= ~BACKGROUND_ALL; + break; + case 41: /* Red */ + attr &= ~BACKGROUND_ALL; + attr |= BACKGROUND_RED; + break; + case 42: /* Green */ + attr &= ~BACKGROUND_ALL; + attr |= BACKGROUND_GREEN; + break; + case 43: /* Yellow */ + attr &= ~BACKGROUND_ALL; + attr |= BACKGROUND_RED | BACKGROUND_GREEN; + break; + case 44: /* Blue */ + attr &= ~BACKGROUND_ALL; + attr |= BACKGROUND_BLUE; + break; + case 45: /* Magenta */ + attr &= ~BACKGROUND_ALL; + attr |= BACKGROUND_RED | BACKGROUND_BLUE; + break; + case 46: /* Cyan */ + attr &= ~BACKGROUND_ALL; + attr |= BACKGROUND_GREEN | BACKGROUND_BLUE; + break; + case 47: /* White */ + attr |= BACKGROUND_RED | + BACKGROUND_GREEN | + BACKGROUND_BLUE; + break; + case 48: /* Unknown */ + break; + case 49: /* reset */ + attr &= ~BACKGROUND_ALL; + attr |= (plain_attr & BACKGROUND_ALL); + break; + default: + /* Unsupported code */ + break; + } + } + set_console_attr(); + break; + case 'K': + erase_in_line(); + break; + default: + /* Unsupported code */ + break; + } +} + +enum { + TEXT = 0, ESCAPE = 033, BRACKET = '[' +}; + +static DWORD WINAPI console_thread(LPVOID unused) +{ + unsigned char buffer[BUFFER_SIZE]; + DWORD bytes; + int start, end = 0, c, parampos = 0, state = TEXT; + int params[MAX_PARAMS]; + + while (1) { + /* read next chunk of bytes from the pipe */ + if (!ReadFile(hread, buffer + end, BUFFER_SIZE - end, &bytes, + NULL)) { + /* exit if pipe has been closed or disconnected */ + if (GetLastError() == ERROR_PIPE_NOT_CONNECTED || + GetLastError() == ERROR_BROKEN_PIPE) + break; + /* ignore other errors */ + continue; + } + + /* scan the bytes and handle ANSI control codes */ + bytes += end; + start = end = 0; + while (end < bytes) { + c = buffer[end++]; + switch (state) { + case TEXT: + if (c == ESCAPE) { + /* print text seen so far */ + if (end - 1 > start) + write_console(buffer + start, + end - 1 - start); + + /* then start parsing escape sequence */ + start = end - 1; + memset(params, 0, sizeof(params)); + parampos = 0; + state = ESCAPE; + } + break; + + case ESCAPE: + /* continue if "\033[", otherwise bail out */ + state = (c == BRACKET) ? BRACKET : TEXT; + break; + + case BRACKET: + /* parse [0-9;]* into array of parameters */ + if (c >= '0' && c <= '9') { + params[parampos] *= 10; + params[parampos] += c - '0'; + } else if (c == ';') { + /* + * next parameter, bail out if out of + * bounds + */ + parampos++; + if (parampos >= MAX_PARAMS) + state = TEXT; + } else { + /* + * end of escape sequence, change + * console attributes + */ + set_attr(c, params, parampos + 1); + start = end; + state = TEXT; + } + break; + } + } + + /* print remaining text unless parsing an escape sequence */ + if (state == TEXT && end > start) { + /* check for incomplete UTF-8 sequences and fix end */ + if (buffer[end - 1] >= 0x80) { + if (buffer[end -1] >= 0xc0) + end--; + else if (end - 1 > start && + buffer[end - 2] >= 0xe0) + end -= 2; + else if (end - 2 > start && + buffer[end - 3] >= 0xf0) + end -= 3; + } + + /* print remaining complete UTF-8 sequences */ + if (end > start) + write_console(buffer + start, end - start); + + /* move remaining bytes to the front */ + if (end < bytes) + memmove(buffer, buffer + end, bytes - end); + end = bytes - end; + } else { + /* all data has been consumed, mark buffer empty */ + end = 0; + } + } + + /* check if the console font supports unicode */ + warn_if_raster_font(); + + CloseHandle(hread); + return 0; +} + +static void winansi_exit(void) +{ + /* flush all streams */ + _flushall(); + + /* signal console thread to exit */ + FlushFileBuffers(hwrite); + DisconnectNamedPipe(hwrite); + + /* wait for console thread to copy remaining data */ + WaitForSingleObject(hthread, INFINITE); + + /* cleanup handles... */ + CloseHandle(hwrite); + CloseHandle(hthread); +} + +static void die_lasterr(const char *fmt, ...) +{ + va_list params; + va_start(params, fmt); + errno = err_win_to_posix(GetLastError()); + die_errno(fmt, params); + va_end(params); +} + +static HANDLE duplicate_handle(HANDLE hnd) +{ + HANDLE hresult, hproc = GetCurrentProcess(); + if (!DuplicateHandle(hproc, hnd, hproc, &hresult, 0, TRUE, + DUPLICATE_SAME_ACCESS)) + die_lasterr("DuplicateHandle(%li) failed", (long) hnd); + return hresult; +} + + +/* + * Make MSVCRT's internal file descriptor control structure accessible + * so that we can tweak OS handles and flags directly (we need MSVCRT + * to treat our pipe handle as if it were a console). + * + * We assume that the ioinfo structure (exposed by MSVCRT.dll via + * __pioinfo) starts with the OS handle and the flags. The exact size + * varies between MSVCRT versions, so we try different sizes until + * toggling the FDEV bit of _pioinfo(1)->osflags is reflected in + * isatty(1). + */ +typedef struct { + HANDLE osfhnd; + char osflags; +} ioinfo; + +extern __declspec(dllimport) ioinfo *__pioinfo[]; + +static size_t sizeof_ioinfo = 0; + +#define IOINFO_L2E 5 +#define IOINFO_ARRAY_ELTS (1 << IOINFO_L2E) + +#define FDEV 0x40 + +static inline ioinfo* _pioinfo(int fd) +{ + return (ioinfo*)((char*)__pioinfo[fd >> IOINFO_L2E] + + (fd & (IOINFO_ARRAY_ELTS - 1)) * sizeof_ioinfo); +} + +static int init_sizeof_ioinfo() +{ + int istty, wastty; + /* don't init twice */ + if (sizeof_ioinfo) + return sizeof_ioinfo >= 256; + + sizeof_ioinfo = sizeof(ioinfo); + wastty = isatty(1); + while (sizeof_ioinfo < 256) { + /* toggle FDEV flag, check isatty, then toggle back */ + _pioinfo(1)->osflags ^= FDEV; + istty = isatty(1); + _pioinfo(1)->osflags ^= FDEV; + /* return if we found the correct size */ + if (istty != wastty) + return 0; + sizeof_ioinfo += sizeof(void*); + } + error("Tweaking file descriptors doesn't work with this MSVCRT.dll"); + return 1; +} + +static HANDLE swap_osfhnd(int fd, HANDLE new_handle) +{ + ioinfo *pioinfo; + HANDLE old_handle; + + /* init ioinfo size if we haven't done so */ + if (init_sizeof_ioinfo()) + return INVALID_HANDLE_VALUE; + + /* get ioinfo pointer and change the handles */ + pioinfo = _pioinfo(fd); + old_handle = pioinfo->osfhnd; + pioinfo->osfhnd = new_handle; + return old_handle; +} + +void winansi_init(void) +{ + int con1, con2; + char name[32]; + + /* check if either stdout or stderr is a console output screen buffer */ + con1 = is_console(1); + con2 = is_console(2); + if (!con1 && !con2) + return; + + /* create a named pipe to communicate with the console thread */ + sprintf(name, "\\\\.\\pipe\\winansi%lu", GetCurrentProcessId()); + hwrite = CreateNamedPipe(name, PIPE_ACCESS_OUTBOUND, + PIPE_TYPE_BYTE | PIPE_WAIT, 1, BUFFER_SIZE, 0, 0, NULL); + if (hwrite == INVALID_HANDLE_VALUE) + die_lasterr("CreateNamedPipe failed"); + + hread = CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL); + if (hread == INVALID_HANDLE_VALUE) + die_lasterr("CreateFile for named pipe failed"); + + /* start console spool thread on the pipe's read end */ + hthread = CreateThread(NULL, 0, console_thread, NULL, 0, NULL); + if (hthread == INVALID_HANDLE_VALUE) + die_lasterr("CreateThread(console_thread) failed"); + + /* schedule cleanup routine */ + if (atexit(winansi_exit)) + die_errno("atexit(winansi_exit) failed"); + + /* redirect stdout / stderr to the pipe */ + if (con1) + hconsole1 = swap_osfhnd(1, duplicate_handle(hwrite)); + if (con2) + hconsole2 = swap_osfhnd(2, duplicate_handle(hwrite)); +} + +/* + * Returns the real console handle if stdout / stderr is a pipe redirecting + * to the console. Allows spawn / exec to pass the console to the next process. + */ +HANDLE winansi_get_osfhandle(int fd) +{ + HANDLE hnd = (HANDLE) _get_osfhandle(fd); + if ((fd == 1 || fd == 2) && isatty(fd) + && GetFileType(hnd) == FILE_TYPE_PIPE) + return (fd == 1) ? hconsole1 : hconsole2; + return hnd; +} |