summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/connect/global.h56
-rw-r--r--storage/connect/ha_connect.cc256
-rw-r--r--storage/connect/ha_connect.h4
-rw-r--r--storage/connect/inihandl.cpp2
-rw-r--r--storage/connect/json.cpp164
-rw-r--r--storage/connect/json.h57
-rw-r--r--storage/connect/jsonudf.cpp546
-rw-r--r--storage/connect/jsonudf.h39
-rw-r--r--storage/connect/mongo.cpp10
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_2.result36
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_3.result36
-rw-r--r--storage/connect/mysql-test/connect/r/json_mongo_c.result36
-rw-r--r--storage/connect/mysql-test/connect/r/updelx.result4
-rw-r--r--storage/connect/mysql-test/connect/t/updelx.test4
-rw-r--r--storage/connect/myutil.h4
-rw-r--r--storage/connect/plgdbutl.cpp20
-rw-r--r--storage/connect/plugutil.cpp67
-rw-r--r--storage/connect/tabjson.cpp110
-rw-r--r--storage/connect/tabjson.h4
-rw-r--r--storage/connect/tabrest.cpp28
-rw-r--r--storage/connect/tabxml.cpp9
-rw-r--r--storage/connect/user_connect.cc20
-rw-r--r--storage/connect/value.cpp9
-rw-r--r--storage/connect/value.h7
-rw-r--r--storage/heap/ha_heap.cc27
-rw-r--r--storage/innobase/CMakeLists.txt1
-rw-r--r--storage/innobase/btr/btr0btr.cc4
-rw-r--r--storage/innobase/btr/btr0bulk.cc4
-rw-r--r--storage/innobase/btr/btr0cur.cc89
-rw-r--r--storage/innobase/btr/btr0pcur.cc39
-rw-r--r--storage/innobase/buf/buf0block_hint.cc78
-rw-r--r--storage/innobase/buf/buf0buf.cc76
-rw-r--r--storage/innobase/dict/dict0dict.cc6
-rw-r--r--storage/innobase/dict/dict0mem.cc28
-rw-r--r--storage/innobase/dict/dict0stats.cc207
-rw-r--r--storage/innobase/fil/fil0crypt.cc2
-rw-r--r--storage/innobase/fts/fts0ast.cc8
-rw-r--r--storage/innobase/fts/fts0que.cc31
-rw-r--r--storage/innobase/gis/gis0sea.cc25
-rw-r--r--storage/innobase/handler/ha_innodb.cc88
-rw-r--r--storage/innobase/handler/i_s.cc47
-rw-r--r--storage/innobase/include/btr0btr.h4
-rw-r--r--storage/innobase/include/btr0cur.h31
-rw-r--r--storage/innobase/include/btr0pcur.h11
-rw-r--r--storage/innobase/include/btr0sea.h2
-rw-r--r--storage/innobase/include/buf0block_hint.h77
-rw-r--r--storage/innobase/include/buf0buf.h18
-rw-r--r--storage/innobase/include/buf0buf.ic37
-rw-r--r--storage/innobase/include/dict0dict.h4
-rw-r--r--storage/innobase/include/dict0dict.ic2
-rw-r--r--storage/innobase/include/dict0mem.h43
-rw-r--r--storage/innobase/include/dict0stats.ic7
-rw-r--r--storage/innobase/include/mtr0mtr.ic4
-rw-r--r--storage/innobase/include/sync0sync.h1
-rw-r--r--storage/innobase/include/trx0sys.h2
-rw-r--r--storage/innobase/include/trx0undo.h4
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc4
-rw-r--r--storage/innobase/row/row0mysql.cc7
-rw-r--r--storage/innobase/row/row0uins.cc2
-rw-r--r--storage/innobase/sync/sync0debug.cc3
-rw-r--r--storage/innobase/sync/sync0sync.cc1
-rw-r--r--storage/innobase/trx/trx0rec.cc2
-rw-r--r--storage/innobase/trx/trx0undo.cc9
-rw-r--r--storage/maria/ha_maria.cc3
-rw-r--r--storage/mroonga/vendor/groonga/CMakeLists.txt1
-rw-r--r--storage/mroonga/vendor/groonga/lib/CMakeLists.txt2
-rw-r--r--storage/rocksdb/build_rocksdb.cmake2
67 files changed, 1662 insertions, 909 deletions
diff --git a/storage/connect/global.h b/storage/connect/global.h
index fd26c87b800..d17620861fa 100644
--- a/storage/connect/global.h
+++ b/storage/connect/global.h
@@ -1,7 +1,7 @@
/***********************************************************************/
/* GLOBAL.H: Declaration file used by all CONNECT implementations. */
/* (C) Copyright MariaDB Corporation Ab */
-/* Author Olivier Bertrand 1993-2018 */
+/* Author Olivier Bertrand 1993-2020 */
/***********************************************************************/
/***********************************************************************/
@@ -89,14 +89,10 @@ extern "C" {
#define PAT_LOG "log"
#if defined(UNIX) || defined(LINUX) || defined(UNIV_LINUX)
- /*********************************************************************/
- /* printf does not accept null pointer for %s target. */
- /*********************************************************************/
+ // printf does not accept null pointer for %s target
#define SVP(S) ((S) ? S : "<null>")
#else
- /*********************************************************************/
- /* printf accepts null pointer for %s target. */
- /*********************************************************************/
+ // printf accepts null pointer for %s target
#define SVP(S) S
#endif
@@ -112,9 +108,6 @@ extern "C" {
/***********************************************************************/
#include "os.h"
-typedef uint OFFSET;
-typedef char NAME[9];
-
typedef struct {
ushort Length;
char String[2];
@@ -127,6 +120,7 @@ typedef struct _global *PGLOBAL;
typedef struct _globplg *PGS;
typedef struct _activity *PACTIVITY;
typedef struct _parm *PPARM;
+typedef char NAME[9];
/***********************************************************************/
/* Segment Sub-Allocation block structure declares. */
@@ -135,8 +129,8 @@ typedef struct _parm *PPARM;
/* restore them if needed. This scheme implies that no SubFree be used */
/***********************************************************************/
typedef struct { /* Plug Area SubAlloc header */
- OFFSET To_Free; /* Offset of next free block */
- uint FreeBlk; /* Size of remaining free memory */
+ size_t To_Free; /* Offset of next free block */
+ size_t FreeBlk; /* Size of remaining free memory */
} POOLHEADER, *PPOOLHEADER;
/***********************************************************************/
@@ -188,11 +182,12 @@ typedef struct _parm {
/***********************************************************************/
typedef struct _global { /* Global structure */
void *Sarea; /* Points to work area */
- uint Sarea_Size; /* Work area size */
+ size_t Sarea_Size; /* Work area size */
PACTIVITY Activityp;
- char Message[MAX_STR];
+ char Message[MAX_STR]; /* Message (result, error, trace) */
ulong More; /* Used by jsonudf */
- int Createas; /* To pass multi to ext tables */
+ size_t Saved_Size; /* Saved work area to_free */
+ bool Createas; /* To pass multi to ext tables */
void *Xchk; /* indexes in create/alter */
short Alchecked; /* Checked for ALTER */
short Mrr; /* True when doing mrr */
@@ -210,19 +205,18 @@ DllExport char *PlugReadMessage(PGLOBAL, int, char *);
DllExport char *PlugGetMessage(PGLOBAL, int);
#endif // XMSG || NEWMSG
#if defined(__WIN__)
-DllExport short GetLineLength(PGLOBAL); // Console line length
+DllExport short GetLineLength(PGLOBAL); // Console line length
#endif // __WIN__
-DllExport PGLOBAL PlugInit(LPCSTR, uint); // Plug global initialization
-DllExport int PlugExit(PGLOBAL); // Plug global termination
+DllExport PGLOBAL PlugInit(LPCSTR, size_t); // Plug global initialization
+DllExport int PlugExit(PGLOBAL); // Plug global termination
DllExport LPSTR PlugRemoveType(LPSTR, LPCSTR);
DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir);
DllExport BOOL PlugIsAbsolutePath(LPCSTR path);
-DllExport bool AllocSarea(PGLOBAL, uint);
+DllExport bool AllocSarea(PGLOBAL, size_t);
DllExport void FreeSarea(PGLOBAL);
-DllExport BOOL PlugSubSet(void *, uint);
+DllExport BOOL PlugSubSet(void *, size_t);
DllExport void *PlugSubAlloc(PGLOBAL, void *, size_t);
DllExport char *PlugDup(PGLOBAL g, const char *str);
-DllExport void *MakePtr(void *, OFFSET);
DllExport void htrc(char const *fmt, ...);
DllExport void xtrc(uint, char const* fmt, ...);
DllExport uint GetTraceValue(void);
@@ -232,8 +226,24 @@ DllExport uint GetTraceValue(void);
#endif
/***********************************************************************/
-/* Non exported routine declarations. */
+/* Inline routine definitions. */
+/***********************************************************************/
+/***********************************************************************/
+/* This routine makes a pointer from an offset to a memory pointer. */
+/***********************************************************************/
+inline void* MakePtr(void* memp, size_t offset) {
+ // return ((offset == 0) ? NULL : &((char*)memp)[offset]);
+ return (!offset) ? NULL : (char *)memp + offset;
+} /* end of MakePtr */
+
+/***********************************************************************/
+/* This routine makes an offset from a pointer new format. */
/***********************************************************************/
-//void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw
+inline size_t MakeOff(void* memp, void* ptr) {
+#if defined(_DEBUG)
+ assert(ptr > memp);
+#endif // _DEBUG
+ return ((!ptr) ? 0 : (size_t)((char*)ptr - (size_t)memp));
+} /* end of MakeOff */
/*-------------------------- End of Global.H --------------------------*/
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index a5c90e50293..00a0f4f6270 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -170,9 +170,9 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.07.0001 November 12, 2019";
+ char version[]= "Version 1.07.0002 October 18, 2020";
#if defined(__WIN__)
- char compver[]= "Version 1.07.0001 " __DATE__ " " __TIME__;
+ char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__;
char slash= '\\';
#else // !__WIN__
char slash= '/';
@@ -251,11 +251,13 @@ bool ExactInfo(void);
USETEMP UseTemp(void);
int GetConvSize(void);
TYPCONV GetTypeConv(void);
+bool JsonAllPath(void);
char *GetJsonNull(void);
+int GetDefaultDepth(void);
uint GetJsonGrpSize(void);
char *GetJavaWrapper(void);
-uint GetWorkSize(void);
-void SetWorkSize(uint);
+size_t GetWorkSize(void);
+void SetWorkSize(size_t);
extern "C" const char *msglang(void);
static void PopUser(PCONNECT xp);
@@ -345,11 +347,19 @@ static MYSQL_THDVAR_ENUM(
1, // def (AUTO)
&usetemp_typelib); // typelib
+#ifdef _WIN64
// Size used for g->Sarea_Size
-static MYSQL_THDVAR_UINT(work_size,
- PLUGIN_VAR_RQCMDARG,
- "Size of the CONNECT work area.",
- NULL, NULL, SZWORK, SZWMIN, UINT_MAX, 1);
+static MYSQL_THDVAR_ULONGLONG(work_size,
+ PLUGIN_VAR_RQCMDARG,
+ "Size of the CONNECT work area.",
+ NULL, NULL, SZWORK, SZWMIN, ULONGLONG_MAX, 1);
+#else
+// Size used for g->Sarea_Size
+static MYSQL_THDVAR_ULONG(work_size,
+ PLUGIN_VAR_RQCMDARG,
+ "Size of the CONNECT work area.",
+ NULL, NULL, SZWORK, SZWMIN, ULONG_MAX, 1);
+#endif
// Size used when converting TEXT columns to VARCHAR
static MYSQL_THDVAR_INT(conv_size,
@@ -384,6 +394,11 @@ static MYSQL_THDVAR_ENUM(
1, // def (yes)
&xconv_typelib); // typelib
+// Adding JPATH to all Json table columns
+static MYSQL_THDVAR_BOOL(json_all_path, PLUGIN_VAR_RQCMDARG,
+ "Adding JPATH to all Json table columns",
+ NULL, NULL, 0); // NO by default
+
// Null representation for JSON values
static MYSQL_THDVAR_STR(json_null,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
@@ -391,6 +406,12 @@ static MYSQL_THDVAR_STR(json_null,
// check_json_null, update_json_null,
NULL, NULL, "<null>");
+// Default Json, XML or Mongo depth
+static MYSQL_THDVAR_INT(default_depth,
+ PLUGIN_VAR_RQCMDARG,
+ "Default depth used by Json, XML and Mongo discovery",
+ NULL, NULL, 0, -1, 16, 1);
+
// Estimate max number of rows for JSON aggregate functions
static MYSQL_THDVAR_UINT(json_grp_size,
PLUGIN_VAR_RQCMDARG, // opt
@@ -452,15 +473,17 @@ uint GetTraceValue(void)
{return (uint)(connect_hton ? THDVAR(current_thd, xtrace) : 0);}
bool ExactInfo(void) {return THDVAR(current_thd, exact_info);}
static bool CondPushEnabled(void) {return THDVAR(current_thd, cond_push);}
+bool JsonAllPath(void) {return THDVAR(current_thd, json_all_path);}
USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);}
int GetConvSize(void) {return THDVAR(current_thd, conv_size);}
TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);}
char *GetJsonNull(void)
{return connect_hton ? THDVAR(current_thd, json_null) : NULL;}
+int GetDefaultDepth(void) {return THDVAR(current_thd, default_depth);}
uint GetJsonGrpSize(void)
{return connect_hton ? THDVAR(current_thd, json_grp_size) : 10;}
-uint GetWorkSize(void) {return THDVAR(current_thd, work_size);}
-void SetWorkSize(uint)
+size_t GetWorkSize(void) {return (size_t)THDVAR(current_thd, work_size);}
+void SetWorkSize(size_t)
{
// Changing the session variable value seems to be impossible here
// and should be done in a check function
@@ -470,7 +493,8 @@ void SetWorkSize(uint)
#if defined(JAVA_SUPPORT)
char *GetJavaWrapper(void)
-{return connect_hton ? THDVAR(current_thd, java_wrapper) : (char*)"wrappers/JdbcInterface";}
+{return connect_hton ? THDVAR(current_thd, java_wrapper)
+ : (char*)"wrappers/JdbcInterface";}
#endif // JAVA_SUPPORT
#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
@@ -619,8 +643,10 @@ ha_create_table_option connect_field_option_list[]=
HA_FOPTION_NUMBER("FIELD_LENGTH", fldlen, 0, 0, INT_MAX32, 1),
HA_FOPTION_STRING("DATE_FORMAT", dateformat),
HA_FOPTION_STRING("FIELD_FORMAT", fieldformat),
- HA_FOPTION_STRING("SPECIAL", special),
- HA_FOPTION_ENUM("DISTRIB", opt, "scattered,clustered,sorted", 0),
+ HA_FOPTION_STRING("JPATH", jsonpath),
+ HA_FOPTION_STRING("XPATH", xmlpath),
+ HA_FOPTION_STRING("SPECIAL", special),
+ HA_FOPTION_ENUM("DISTRIB", opt, "scattered,clustered,sorted", 0),
HA_FOPTION_END
};
@@ -1311,9 +1337,10 @@ int GetIntegerTableOption(PGLOBAL g, PTOS options, PCSZ opname, int idef)
if ((ulonglong) opval == (ulonglong)NO_IVAL) {
PCSZ pv;
- if ((pv= GetListOption(g, opname, options->oplist)))
- opval= CharToNumber((char*)pv, strlen(pv), ULONGLONG_MAX, true);
- else
+ if ((pv = GetListOption(g, opname, options->oplist))) {
+ // opval = CharToNumber((char*)pv, strlen(pv), ULONGLONG_MAX, false);
+ return atoi(pv);
+ } else
return idef;
} // endif opval
@@ -1565,8 +1592,9 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Offset= (int)fop->offset;
pcf->Freq= (int)fop->freq;
pcf->Datefmt= (char*)fop->dateformat;
- pcf->Fieldfmt= (char*)fop->fieldformat;
- } else {
+ pcf->Fieldfmt = fop->fieldformat ? (char*)fop->fieldformat
+ : fop->jsonpath ? (char*)fop->jsonpath : (char*)fop->xmlpath;
+ } else {
pcf->Offset= -1;
pcf->Freq= 0;
pcf->Datefmt= NULL;
@@ -1575,6 +1603,9 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
chset= (char *)fp->charset()->name;
+ if (!strcmp(chset, "binary"))
+ v = 'B'; // Binary string
+
switch (fp->type()) {
case MYSQL_TYPE_BLOB:
case MYSQL_TYPE_VARCHAR:
@@ -1584,7 +1615,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
default:
pcf->Type= MYSQLtoPLG(fp->type(), &v);
break;
- } // endswitch SQL type
+ } // endswitch SQL type
switch (pcf->Type) {
case TYPE_STRING:
@@ -1638,7 +1669,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
break;
default:
break;
- } // endswitch type
+ } // endswitch type
if (fp->flags & UNSIGNED_FLAG)
pcf->Flags |= U_UNSIGNED;
@@ -2209,7 +2240,7 @@ int ha_connect::MakeRecord(char *buf)
case TYPE_BIN:
p= value->GetCharValue();
charset= &my_charset_bin;
- rc= fp->store(p, strlen(p), charset, CHECK_FIELD_WARN);
+ rc= fp->store(p, value->GetSize(), charset, CHECK_FIELD_WARN);
break;
case TYPE_DOUBLE:
rc= fp->store(value->GetFloatValue());
@@ -4969,7 +5000,7 @@ int ha_connect::check_stmt(PGLOBAL g, MODE newmode, bool cras)
} // endif CheckCleanup
if (cras)
- g->Createas= 1; // To tell external tables of a multi-table command
+ g->Createas= true; // To tell external tables of a multi-table command
if (trace(1))
htrc("Calling CntCheckDB db=%s cras=%d\n", GetDBName(NULL), cras);
@@ -5319,91 +5350,100 @@ static char *encode(PGLOBAL g, const char *cnm)
@return
Return 0 if ok
*/
-static bool add_field(String *sql, const char *field_name, int typ, int len,
- int dec, char *key, uint tm, const char *rem, char *dft,
- char *xtra, char *fmt, int flag, bool dbf, char v)
-{
- char var= (len > 255) ? 'V' : v;
- bool q, error= false;
- const char *type= PLGtoMYSQLtype(typ, dbf, var);
+static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
+ int len, int dec, char* key, uint tm, const char* rem,
+ char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) {
+#if defined(DEVELOPMENT)
+ // Some client programs regard CHAR(36) as GUID
+ char var = (len > 255 || len == 36) ? 'V' : v;
+#else
+ char var = (len > 255) ? 'V' : v;
+#endif
+ bool q, error = false;
+ const char* type = PLGtoMYSQLtype(typ, dbf, var);
- error|= sql->append('`');
- error|= sql->append(field_name);
- error|= sql->append("` ");
- error|= sql->append(type);
+ error |= sql->append('`');
+ error |= sql->append(field_name);
+ error |= sql->append("` ");
+ error |= sql->append(type);
- if (typ == TYPE_STRING ||
- (len && typ != TYPE_DATE && (typ != TYPE_DOUBLE || dec >= 0))) {
- error|= sql->append('(');
- error|= sql->append_ulonglong(len);
+ if (typ == TYPE_STRING ||
+ (len && typ != TYPE_DATE && (typ != TYPE_DOUBLE || dec >= 0))) {
+ error |= sql->append('(');
+ error |= sql->append_ulonglong(len);
if (typ == TYPE_DOUBLE) {
- error|= sql->append(',');
- // dec must be < len and < 31
- error|= sql->append_ulonglong(MY_MIN(dec, (MY_MIN(len, 31) - 1)));
- } else if (dec > 0 && !strcmp(type, "DECIMAL")) {
- error|= sql->append(',');
- // dec must be < len
- error|= sql->append_ulonglong(MY_MIN(dec, len - 1));
- } // endif dec
-
- error|= sql->append(')');
- } // endif len
-
- if (v == 'U')
- error|= sql->append(" UNSIGNED");
- else if (v == 'Z')
- error|= sql->append(" ZEROFILL");
-
- if (key && *key) {
- error|= sql->append(" ");
- error|= sql->append(key);
- } // endif key
-
- if (tm)
- error|= sql->append(STRING_WITH_LEN(" NOT NULL"), system_charset_info);
-
- if (dft && *dft) {
- error|= sql->append(" DEFAULT ");
-
- if (typ == TYPE_DATE)
- q= (strspn(dft, "0123456789 -:/") == strlen(dft));
- else
- q= !IsTypeNum(typ);
+ error |= sql->append(',');
+ // dec must be < len and < 31
+ error |= sql->append_ulonglong(MY_MIN(dec, (MY_MIN(len, 31) - 1)));
+ } else if (dec > 0 && !strcmp(type, "DECIMAL")) {
+ error |= sql->append(',');
+ // dec must be < len
+ error |= sql->append_ulonglong(MY_MIN(dec, len - 1));
+ } // endif dec
+
+ error |= sql->append(')');
+ } // endif len
+
+ if (v == 'U')
+ error |= sql->append(" UNSIGNED");
+ else if (v == 'Z')
+ error |= sql->append(" ZEROFILL");
+
+ if (key && *key) {
+ error |= sql->append(" ");
+ error |= sql->append(key);
+ } // endif key
+
+ if (tm)
+ error |= sql->append(STRING_WITH_LEN(" NOT NULL"), system_charset_info);
+
+ if (dft && *dft) {
+ error |= sql->append(" DEFAULT ");
+
+ if (typ == TYPE_DATE)
+ q = (strspn(dft, "0123456789 -:/") == strlen(dft));
+ else
+ q = !IsTypeNum(typ);
- if (q) {
- error|= sql->append("'");
- error|= sql->append_for_single_quote(dft, strlen(dft));
- error|= sql->append("'");
- } else
- error|= sql->append(dft);
-
- } // endif dft
-
- if (xtra && *xtra) {
- error|= sql->append(" ");
- error|= sql->append(xtra);
- } // endif rem
-
- if (rem && *rem) {
- error|= sql->append(" COMMENT '");
- error|= sql->append_for_single_quote(rem, strlen(rem));
- error|= sql->append("'");
- } // endif rem
-
- if (fmt && *fmt) {
- error|= sql->append(" FIELD_FORMAT='");
- error|= sql->append_for_single_quote(fmt, strlen(fmt));
- error|= sql->append("'");
- } // endif flag
-
- if (flag) {
- error|= sql->append(" FLAG=");
- error|= sql->append_ulonglong(flag);
- } // endif flag
-
- error|= sql->append(',');
- return error;
+ if (q) {
+ error |= sql->append("'");
+ error |= sql->append_for_single_quote(dft, strlen(dft));
+ error |= sql->append("'");
+ } else
+ error |= sql->append(dft);
+
+ } // endif dft
+
+ if (xtra && *xtra) {
+ error |= sql->append(" ");
+ error |= sql->append(xtra);
+ } // endif rem
+
+ if (rem && *rem) {
+ error |= sql->append(" COMMENT '");
+ error |= sql->append_for_single_quote(rem, strlen(rem));
+ error |= sql->append("'");
+ } // endif rem
+
+ if (fmt && *fmt) {
+ switch (ttp) {
+ case TAB_JSON: error |= sql->append(" JPATH='"); break;
+ case TAB_XML: error |= sql->append(" XPATH='"); break;
+ default: error |= sql->append(" FIELD_FORMAT='");
+ } // endswitch ttp
+
+ error |= sql->append_for_single_quote(fmt, strlen(fmt));
+ error |= sql->append("'");
+ } // endif flag
+
+ if (flag) {
+ error |= sql->append(" FLAG=");
+ error |= sql->append_ulonglong(flag);
+ } // endif flag
+
+ error |= sql->append(',');
+ return error;
} // end of add_field
/**
@@ -6024,7 +6064,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
len= 256; // STRBLK's have 0 length
// Now add the field
- if (add_field(&sql, cnm, typ, len, dec, NULL, tm,
+ if (add_field(&sql, ttp, cnm, typ, len, dec, NULL, tm,
NULL, NULL, NULL, NULL, flg, dbf, v))
rc= HA_ERR_OUT_OF_MEM;
} // endfor crp
@@ -6218,7 +6258,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
prec= 0;
// Now add the field
- if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
+ if (add_field(&sql, ttp, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
fmt, flg, dbf, v))
rc= HA_ERR_OUT_OF_MEM;
} // endfor i
@@ -6971,7 +7011,7 @@ bool ha_connect::NoFieldOptionChange(TABLE *tab)
fop1->fldlen == fop2->fldlen &&
CheckString(fop1->dateformat, fop2->dateformat) &&
CheckString(fop1->fieldformat, fop2->fieldformat) &&
- CheckString(fop1->special, fop2->special));
+ CheckString(fop1->special, fop2->special));
} // endfor fld
return rc;
@@ -7343,7 +7383,9 @@ static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(errmsg_dir_path),
#endif // XMSG
MYSQL_SYSVAR(json_null),
- MYSQL_SYSVAR(json_grp_size),
+ MYSQL_SYSVAR(json_all_path),
+ MYSQL_SYSVAR(default_depth),
+ MYSQL_SYSVAR(json_grp_size),
#if defined(JAVA_SUPPORT)
MYSQL_SYSVAR(jvm_path),
MYSQL_SYSVAR(class_path),
@@ -7369,7 +7411,7 @@ maria_declare_plugin(connect)
0x0107, /* version number (1.07) */
NULL, /* status variables */
connect_system_variables, /* system variables */
- "1.07.0001", /* string version */
+ "1.07.0002", /* string version */
MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
}
maria_declare_plugin_end;
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 06de375ef58..218819d0b73 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -104,7 +104,9 @@ struct ha_field_option_struct
uint opt;
const char *dateformat;
const char *fieldformat;
- char *special;
+ const char* jsonpath;
+ const char* xmlpath;
+ char *special;
};
/*
diff --git a/storage/connect/inihandl.cpp b/storage/connect/inihandl.cpp
index 95cb3a1227d..9270e18721c 100644
--- a/storage/connect/inihandl.cpp
+++ b/storage/connect/inihandl.cpp
@@ -194,7 +194,7 @@ static void PROFILE_Save( FILE *file, PROFILESECTION *section )
}
for (key = section->key; key; key = key->next)
- if (key->name && key->name[0]) {
+ if (key->name[0]) {
fprintf(file, "%s", SVP(key->name));
if (key->value)
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index 5d7d08285cf..f6dca8146d6 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -93,9 +93,8 @@ char *NextChr(PSZ s, char sep)
PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
{
int i, pretty = (ptyp) ? *ptyp : 3;
- bool b = false, pty[3] = {true, true, true};
- PJSON jsp = NULL;
- STRG src;
+ bool b = false, pty[3] = {true,true,true};
+ PJSON jsp = NULL, jp = NULL;
if (trace(1))
htrc("ParseJson: s=%.10s len=%d\n", s, len);
@@ -106,27 +105,29 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
} else if (comma)
*comma = false;
- src.str = s;
- src.len = len;
-
// Trying to guess the pretty format
if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
pty[0] = false;
try {
- for (i = 0; i < len; i++)
+ jp = new(g) JSON();
+ jp->s = s;
+ jp->len = len;
+ jp->pty = pty;
+
+ for (i = 0; i < jp->len; i++)
switch (s[i]) {
case '[':
if (jsp)
- goto tryit;
- else if (!(jsp = ParseArray(g, ++i, src, pty)))
- throw 1;
+ jsp = jp->ParseAsArray(g, i, pretty, ptyp);
+ else
+ jsp = jp->ParseArray(g, ++i);
break;
case '{':
if (jsp)
- goto tryit;
- else if (!(jsp = ParseObject(g, ++i, src, pty)))
+ jsp = jp->ParseAsArray(g, i, pretty, ptyp);
+ else if (!(jsp = jp->ParseObject(g, ++i)))
throw 2;
break;
@@ -157,8 +158,8 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
/* falls through */
default:
if (jsp)
- goto tryit;
- else if (!(jsp = ParseValue(g, i, src, pty)))
+ jsp = jp->ParseAsArray(g, i, pretty, ptyp);
+ else if (!(jsp = jp->ParseValue(g, i)))
throw 4;
break;
@@ -187,10 +188,17 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
} // end catch
return jsp;
+} // end of ParseJson
-tryit:
+/***********************************************************************/
+/* Parse several items as being in an array. */
+/***********************************************************************/
+PJAR JSON::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp)
+{
if (pty[0] && (!pretty || pretty > 2)) {
- if ((jsp = ParseArray(g, (i = 0), src, pty)) && ptyp && pretty == 3)
+ PJAR jsp;
+
+ if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3)
*ptyp = (pty[0]) ? 0 : 3;
return jsp;
@@ -198,26 +206,23 @@ tryit:
strcpy(g->Message, "More than one item in file");
return NULL;
-} // end of ParseJson
+} // end of ParseAsArray
/***********************************************************************/
/* Parse a JSON Array. */
/***********************************************************************/
-PJAR ParseArray(PGLOBAL g, int& i, STRG& src, bool *pty)
+PJAR JSON::ParseArray(PGLOBAL g, int& i)
{
- char *s = src.str;
- int len = src.len;
- int level = 0;
- bool b = (!i);
- PJAR jarp = new(g) JARRAY;
- PJVAL jvp = NULL;
+ int level = 0;
+ bool b = (!i);
+ PJAR jarp = new(g) JARRAY;
for (; i < len; i++)
switch (s[i]) {
case ',':
if (level < 2) {
sprintf(g->Message, "Unexpected ',' near %.*s",ARGS);
- return NULL;
+ throw 1;
} else
level = 1;
@@ -225,8 +230,8 @@ PJAR ParseArray(PGLOBAL g, int& i, STRG& src, bool *pty)
case ']':
if (level == 1) {
sprintf(g->Message, "Unexpected ',]' near %.*s", ARGS);
- return NULL;
- } // endif level
+ throw 1;
+ } // endif level
jarp->InitArray(g);
return jarp;
@@ -240,11 +245,9 @@ PJAR ParseArray(PGLOBAL g, int& i, STRG& src, bool *pty)
default:
if (level == 2) {
sprintf(g->Message, "Unexpected value near %.*s", ARGS);
- return NULL;
- } else if ((jvp = ParseValue(g, i, src, pty)))
- jarp->AddValue(g, jvp);
- else
- return NULL;
+ throw 1;
+ } else
+ jarp->AddValue(g, ParseValue(g, i));
level = (b) ? 1 : 2;
break;
@@ -256,18 +259,15 @@ PJAR ParseArray(PGLOBAL g, int& i, STRG& src, bool *pty)
return jarp;
} // endif b
- strcpy(g->Message, "Unexpected EOF in array");
- return NULL;
+ throw ("Unexpected EOF in array");
} // end of ParseArray
/***********************************************************************/
/* Parse a JSON Object. */
/***********************************************************************/
-PJOB ParseObject(PGLOBAL g, int& i, STRG& src, bool *pty)
+PJOB JSON::ParseObject(PGLOBAL g, int& i)
{
PSZ key;
- char *s = src.str;
- int len = src.len;
int level = 0;
PJOB jobp = new(g) JOBJECT;
PJPR jpp = NULL;
@@ -276,42 +276,37 @@ PJOB ParseObject(PGLOBAL g, int& i, STRG& src, bool *pty)
switch (s[i]) {
case '"':
if (level < 2) {
- if ((key = ParseString(g, ++i, src))) {
- jpp = jobp->AddPair(g, key);
- level = 1;
- } else
- return NULL;
-
+ key = ParseString(g, ++i);
+ jpp = jobp->AddPair(g, key);
+ level = 1;
} else {
sprintf(g->Message, "misplaced string near %.*s", ARGS);
- return NULL;
+ throw 2;
} // endif level
break;
case ':':
if (level == 1) {
- if (!(jpp->Val = ParseValue(g, ++i, src, pty)))
- return NULL;
-
+ jpp->Val = ParseValue(g, ++i);
level = 2;
} else {
sprintf(g->Message, "Unexpected ':' near %.*s", ARGS);
- return NULL;
+ throw 2;
} // endif level
break;
case ',':
if (level < 2) {
sprintf(g->Message, "Unexpected ',' near %.*s", ARGS);
- return NULL;
+ throw 2;
} else
- level = 1;
+ level = 0;
break;
case '}':
- if (level == 1) {
+ if (level < 2) {
sprintf(g->Message, "Unexpected '}' near %.*s", ARGS);
- return NULL;
+ throw 2;
} // endif level
return jobp;
@@ -324,20 +319,19 @@ PJOB ParseObject(PGLOBAL g, int& i, STRG& src, bool *pty)
default:
sprintf(g->Message, "Unexpected character '%c' near %.*s",
s[i], ARGS);
- return NULL;
+ throw 2;
}; // endswitch s[i]
strcpy(g->Message, "Unexpected EOF in Object");
- return NULL;
+ throw 2;
} // end of ParseObject
/***********************************************************************/
/* Parse a JSON Value. */
/***********************************************************************/
-PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty)
+PJVAL JSON::ParseValue(PGLOBAL g, int& i)
{
- char *strval, *s = src.str;
- int n, len = src.len;
+ int n;
PJVAL jvp = new(g) JVALUE;
for (; i < len; i++)
@@ -355,21 +349,13 @@ PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty)
suite:
switch (s[i]) {
case '[':
- if (!(jvp->Jsp = ParseArray(g, ++i, src, pty)))
- return NULL;
-
+ jvp->Jsp = ParseArray(g, ++i);
break;
case '{':
- if (!(jvp->Jsp = ParseObject(g, ++i, src, pty)))
- return NULL;
-
+ jvp->Jsp = ParseObject(g, ++i);
break;
case '"':
- if ((strval = ParseString(g, ++i, src)))
- jvp->Value = AllocateValue(g, strval, TYPE_STRING);
- else
- return NULL;
-
+ jvp->Value = AllocateValue(g, ParseString(g, ++i), TYPE_STRING);
break;
case 't':
if (!strncmp(s + i, "true", 4)) {
@@ -398,11 +384,9 @@ PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty)
break;
case '-':
default:
- if (s[i] == '-' || isdigit(s[i])) {
- if (!(jvp->Value = ParseNumeric(g, i, src)))
- goto err;
-
- } else
+ if (s[i] == '-' || isdigit(s[i]))
+ jvp->Value = ParseNumeric(g, i);
+ else
goto err;
}; // endswitch s[i]
@@ -410,25 +394,21 @@ PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty)
return jvp;
err:
- sprintf(g->Message, "Unexpected character '%c' near %.*s",
- s[i], ARGS);
- return NULL;
+ sprintf(g->Message, "Unexpected character '%c' near %.*s", s[i], ARGS);
+ throw 3;
} // end of ParseValue
/***********************************************************************/
/* Unescape and parse a JSON string. */
/***********************************************************************/
-char *ParseString(PGLOBAL g, int& i, STRG& src)
+char *JSON::ParseString(PGLOBAL g, int& i)
{
- char *s = src.str;
uchar *p;
- int n = 0, len = src.len;
+ int n = 0;
// Be sure of memory availability
- if (len + 1 - i > (signed)((PPOOLHEADER)g->Sarea)->FreeBlk) {
- strcpy(g->Message, "ParseString: Out of memory");
- return NULL;
- } // endif len
+ if (((size_t)len + 1 - i) > ((PPOOLHEADER)g->Sarea)->FreeBlk)
+ throw("ParseString: Out of memory");
// The size to allocate is not known yet
p = (uchar*)PlugSubAlloc(g, NULL, 0);
@@ -502,17 +482,16 @@ char *ParseString(PGLOBAL g, int& i, STRG& src)
}; // endswitch s[i]
err:
- strcpy(g->Message, "Unexpected EOF in String");
- return NULL;
+ throw("Unexpected EOF in String");
} // end of ParseString
/***********************************************************************/
/* Parse a JSON numeric value. */
/***********************************************************************/
-PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
+PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
{
- char *s = src.str, buf[50];
- int n = 0, len = src.len;
+ char buf[50];
+ int n = 0;
short nd = 0;
bool has_dot = false;
bool has_e = false;
@@ -575,14 +554,11 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
i--; // Unstack following character
return valp;
- } else {
- strcpy(g->Message, "No digit found");
- return NULL;
- } // endif found_digit
+ } else
+ throw("No digit found");
err:
- strcpy(g->Message, "Unexpected EOF in number");
- return NULL;
+ throw("Unexpected EOF in number");
} // end of ParseNumeric
/***********************************************************************/
diff --git a/storage/connect/json.h b/storage/connect/json.h
index 1d058ad575f..bc94b372133 100644
--- a/storage/connect/json.h
+++ b/storage/connect/json.h
@@ -1,10 +1,11 @@
/**************** json H Declares Source Code File (.H) ****************/
/* Name: json.h Version 1.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
+#include <mysql_com.h>
#include "value.h"
#if defined(_DEBUG)
@@ -44,15 +45,31 @@ typedef struct {
int len;
} STRG, *PSG;
+// BSON size should be equal on Linux and Windows
+#define BMX 255
+typedef struct BSON* PBSON;
+
+/***********************************************************************/
+/* Structure used to return binary json to Json UDF functions. */
+/***********************************************************************/
+struct BSON {
+ char Msg[BMX + 1];
+ char *Filename;
+ PGLOBAL G;
+ int Pretty;
+ ulong Reslen;
+ my_bool Changed;
+ PJSON Top;
+ PJSON Jsp;
+ PBSON Bsp;
+}; // end of struct BSON
+
+PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+
char *NextChr(PSZ s, char sep);
char *GetJsonNull(void);
-PJSON ParseJson(PGLOBAL g, char *s, int n, int *prty = NULL, bool *b = NULL);
-PJAR ParseArray(PGLOBAL g, int& i, STRG& src, bool *pty);
-PJOB ParseObject(PGLOBAL g, int& i, STRG& src, bool *pty);
-PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty);
-char *ParseString(PGLOBAL g, int& i, STRG& src);
-PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src);
+PJSON ParseJson(PGLOBAL g, char* s, int n, int* prty = NULL, bool* b = NULL);
PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty);
bool SerializeArray(JOUT *js, PJAR jarp, bool b);
bool SerializeObject(JOUT *js, PJOB jobp);
@@ -130,7 +147,7 @@ class JOUTPRT : public JOUTFILE {
class JPAIR : public BLOCK {
friend class JOBJECT;
friend class JSNX;
- friend PJOB ParseObject(PGLOBAL, int&, STRG&, bool*);
+ friend class JSON;
friend bool SerializeObject(JOUT *, PJOB);
public:
JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;}
@@ -149,8 +166,9 @@ class JPAIR : public BLOCK {
/* Class JSON. The base class for all other json classes. */
/***********************************************************************/
class JSON : public BLOCK {
+ friend PJSON ParseJson(PGLOBAL, char*, int, int*, bool*);
public:
- JSON(void) {Size = 0;}
+ JSON(void) : s(NULL), len(0), pty(NULL) {Size = 0;}
int size(void) {return Size;}
virtual int GetSize(bool b) {return Size;}
@@ -187,14 +205,27 @@ class JSON : public BLOCK {
virtual bool IsNull(void) {X return true;}
protected:
- int Size;
+ PJAR ParseArray(PGLOBAL g, int& i);
+ PJOB ParseObject(PGLOBAL g, int& i);
+ PJVAL ParseValue(PGLOBAL g, int& i);
+ char *ParseString(PGLOBAL g, int& i);
+ PVAL ParseNumeric(PGLOBAL g, int& i);
+ PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp);
+
+ // Members
+ int Size;
+
+ // Only used when parsing
+ private:
+ char *s;
+ int len;
+ bool *pty;
}; // end of class JSON
/***********************************************************************/
/* Class JOBJECT: contains a list of value pairs. */
/***********************************************************************/
class JOBJECT : public JSON {
- friend PJOB ParseObject(PGLOBAL, int&, STRG&, bool*);
friend bool SerializeObject(JOUT *, PJOB);
friend class JSNX;
public:
@@ -260,8 +291,8 @@ class JVALUE : public JSON {
friend class JARRAY;
friend class JSNX;
friend class JSONCOL;
- friend PJVAL ParseValue(PGLOBAL, int&, STRG&, bool*);
- friend bool SerializeValue(JOUT *, PJVAL);
+ friend class JSON;
+ friend bool SerializeValue(JOUT*, PJVAL);
public:
JVALUE(void) : JSON() {Clear();}
JVALUE(PJSON jsp);
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 940e7e678fd..44028a32564 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -25,7 +25,7 @@
#else
#define PUSH_WARNING(M) htrc(M)
#endif
-#define M 7
+#define M 9
bool IsNum(PSZ s);
char *NextChr(PSZ s, char sep);
@@ -1076,29 +1076,10 @@ my_bool JSNX::AddPath(void)
/* --------------------------------- JSON UDF ---------------------------------- */
-// BSON size should be equal on Linux and Windows
-#define BMX 255
-typedef struct BSON *PBSON;
-
-/*********************************************************************************/
-/* Structure used to return binary json. */
-/*********************************************************************************/
-struct BSON {
- char Msg[BMX + 1];
- char *Filename;
- PGLOBAL G;
- int Pretty;
- ulong Reslen;
- my_bool Changed;
- PJSON Top;
- PJSON Jsp;
- PBSON Bsp;
-}; // end of struct BSON
-
/*********************************************************************************/
/* Allocate and initialize a BSON structure. */
/*********************************************************************************/
-static PBSON JbinAlloc(PGLOBAL g, UDF_ARGS *args, ulong len, PJSON jsp)
+PBSON JbinAlloc(PGLOBAL g, UDF_ARGS *args, ulong len, PJSON jsp)
{
PBSON bsp = (PBSON)PlgDBSubAlloc(g, NULL, sizeof(BSON));
@@ -1111,7 +1092,7 @@ static PBSON JbinAlloc(PGLOBAL g, UDF_ARGS *args, ulong len, PJSON jsp)
bsp->Reslen = len;
bsp->Changed = false;
bsp->Top = bsp->Jsp = jsp;
- bsp->Bsp = (IsJson(args, 0) == 3) ? (PBSON)args->args[0] : NULL;
+ bsp->Bsp = (args && IsJson(args, 0) == 3) ? (PBSON)args->args[0] : NULL;
} else
PUSH_WARNING(g->Message);
@@ -1144,7 +1125,7 @@ static my_bool JsonSubSet(PGLOBAL g)
{
PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
- pph->To_Free = (OFFSET)((g->Createas) ? g->Createas : sizeof(POOLHEADER));
+ pph->To_Free = (g->Saved_Size) ? g->Saved_Size : (size_t)sizeof(POOLHEADER);
pph->FreeBlk = g->Sarea_Size - pph->To_Free;
return FALSE;
} /* end of JsonSubSet */
@@ -1154,7 +1135,7 @@ static my_bool JsonSubSet(PGLOBAL g)
/*********************************************************************************/
inline void JsonMemSave(PGLOBAL g)
{
- g->Createas = (int)((PPOOLHEADER)g->Sarea)->To_Free;
+ g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free;
} /* end of JsonMemSave */
/*********************************************************************************/
@@ -1422,7 +1403,7 @@ static int IsJson(UDF_ARGS *args, uint i, bool b)
n = 2; // arg is a json file name
} else if (b) {
char *sap;
- PGLOBAL g = PlugInit(NULL, args->lengths[i] * M + 1024);
+ PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024);
JsonSubSet(g);
sap = MakePSZ(g, args, i);
@@ -1625,7 +1606,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
return true;
} // endif SareaAlloc
- g->Createas = 0;
+ g->Saved_Size = 0;
g->Xchk = NULL;
initid->max_length = rl;
} // endif Size
@@ -4425,13 +4406,15 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
fn = MakePSZ(g, args, 0);
if (args->arg_count > 1) {
- int len, pretty, pty = 3;
+ int len, pretty = 3, pty = 3;
PJSON jsp;
PJVAL jvp = NULL;
- pretty = (args->arg_type[1] == INT_RESULT) ? (int)*(longlong*)args->args[1]
- : (args->arg_count > 2 && args->arg_type[2] == INT_RESULT)
- ? (int)*(longlong*)args->args[2] : 3;
+ for (unsigned int i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) {
+ pretty = (int) * (longlong*)args->args[i];
+ break;
+ } // endif type
/*******************************************************************************/
/* Parse the json file and allocate its tree structure. */
@@ -4499,6 +4482,7 @@ my_bool jfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif
CalcLen(args, false, reslen, memlen);
+ memlen = memlen + 5000; // To take care of not pretty files
return JsonInit(initid, args, message, true, reslen, memlen);
} // end of jfile_make_init
@@ -5628,20 +5612,19 @@ my_bool jbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else if (args->arg_type[0] != STRING_RESULT || !args->args[0]) {
strcpy(message, "First argument must be a constant string (file name)");
return true;
- } else if (args->arg_count > 1 && args->arg_type[1] != STRING_RESULT) {
- strcpy(message, "Second argument is not a string (path)");
- return true;
- } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
- strcpy(message, "Third argument is not an integer (pretty)");
- return true;
- } else if (args->arg_count > 3) {
- if (args->arg_type[3] != INT_RESULT) {
- strcpy(message, "Fourth argument is not an integer (memory)");
+ } // endifs
+
+ for (unsigned int i = 1; i < args->arg_count; i++) {
+ if (!(args->arg_type[i] == INT_RESULT || args->arg_type[i] == STRING_RESULT)) {
+ sprintf(message, "Argument %d is not an integer or a string (pretty or path)", i);
return true;
- } else
- more += (ulong)*(longlong*)args->args[3];
+ } // endif arg_type
- } // endifs
+ // Take care of eventual memory argument
+ if (args->arg_type[i] == INT_RESULT && args->args[i])
+ more += (ulong) * (longlong*)args->args[i];
+
+ } // endfor i
initid->maybe_null = 1;
CalcLen(args, false, reslen, memlen);
@@ -5656,7 +5639,7 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *fn;
- int pretty, len = 0, pty = 3;
+ int pretty = 3, len = 0, pty = 3;
PJSON jsp;
PJVAL jvp = NULL;
PGLOBAL g = (PGLOBAL)initid->ptr;
@@ -5668,7 +5651,12 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
PlugSubSet(g->Sarea, g->Sarea_Size);
g->Xchk = NULL;
fn = MakePSZ(g, args, 0);
- pretty = (args->arg_count > 2 && args->args[2]) ? (int)*(longlong*)args->args[2] : 3;
+
+ for (unsigned int i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) {
+ pretty = (int) * (longlong*)args->args[i];
+ break;
+ } // endif type
/*********************************************************************************/
/* Parse the json file and allocate its tree structure. */
@@ -5759,7 +5747,7 @@ char *json_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result,
// Keep result of constant function
g->Xchk = (initid->const_item) ? str : NULL;
} else {
- *error = 1;
+ // *error = 1;
str = strcpy(result, "Argument is not a Jbin tree");
} // endif
@@ -5776,6 +5764,474 @@ void json_serialize_deinit(UDF_INIT* initid)
} // end of json_serialize_deinit
/*********************************************************************************/
+/* Convert a prettiest Json file to Pretty=0. */
+/*********************************************************************************/
+my_bool jfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 3) {
+ strcpy(message, "This function must have 3 arguments");
+ return true;
+ } else if (args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i+1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of jfile_convert_init
+
+char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long *res_length, char *, char *error) {
+ char *str, *fn, *ofn;
+ int lrecl = (int)*(longlong*)args->args[2];
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+ ofn = MakePSZ(g, args, 1);
+
+ if (!g->Xchk) {
+ JUP* jup = new(g) JUP(g);
+
+ str = jup->UnprettyJsonFile(g, fn, ofn, lrecl);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ str = PlugDup(g, g->Message);
+ } // endif str
+
+ *res_length = strlen(str);
+ return str;
+} // end of jfile_convert
+
+void jfile_convert_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of jfile_convert_deinit
+
+/* --------------------------------- Class JUP --------------------------------- */
+
+#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0)
+
+/*********************************************************************************/
+/* JUP public constructor. */
+/*********************************************************************************/
+JUP::JUP(PGLOBAL g) {
+ fs = NULL;
+ s = buff = NULL;
+ i = k = len = recl = 0;
+} // end of JUP constructor
+
+/*********************************************************************************/
+/* Copy a json file to another with pretty = 0. */
+/*********************************************************************************/
+char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
+ char *ret = NULL;
+ HANDLE hFile;
+ MEMMAP mm;
+
+ /*******************************************************************************/
+ /* Create the mapping file object. */
+ /*******************************************************************************/
+ hFile = CreateFileMap(g, fn, &mm, MODE_READ, false);
+
+ if (hFile == INVALID_HANDLE_VALUE) {
+ DWORD rc = GetLastError();
+
+ if (!(*g->Message))
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int)rc, fn);
+
+ return NULL;
+ } // endif hFile
+
+ /*******************************************************************************/
+ /* Get the file size (assuming file is smaller than 4 GB) */
+ /*******************************************************************************/
+ if (!mm.lenL) { // Empty or deleted file
+ CloseFileHandle(hFile);
+ return NULL;
+ } else
+ len = (int)mm.lenL;
+
+ if (!mm.memory) {
+ CloseFileHandle(hFile);
+ sprintf(g->Message, MSG(MAP_VIEW_ERROR), fn, GetLastError());
+ return NULL;
+ } else
+ s = (char*)mm.memory;
+
+ CloseFileHandle(hFile); // Not used anymore
+
+ /*********************************************************************************/
+ /* Parse the json file and allocate its tree structure. */
+ /*********************************************************************************/
+ if (!(fs = fopen(outfn, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, outfn);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ CloseMemMap(mm.memory, (size_t)mm.lenL);
+ return NULL;
+ } // endif fs
+
+ g->Message[0] = 0;
+
+ if (!unPretty(g, lrecl))
+ ret = outfn;
+
+ CloseMemMap(mm.memory, (size_t)mm.lenL);
+ fclose(fs);
+ return ret;
+} // end of UnprettyJsonFile
+
+/***********************************************************************/
+/* Translate a json file to pretty = 0. */
+/***********************************************************************/
+bool JUP::unPretty(PGLOBAL g, int lrecl) {
+ bool go, next, rc = false;
+
+ if (trace(1))
+ htrc("UnPretty: s=%.10s len=%zd lrecl=%d\n", s, len, lrecl);
+
+ if (!s || !len) {
+ strcpy(g->Message, "Void JSON file");
+ return true;
+ } else if (*s != '[') {
+ // strcpy(g->Message, "JSON file is not an array");
+ s = strchr(s, '[');
+ // return true;
+ } // endif s
+
+ i = 1;
+ go = next = true;
+
+ try {
+ // Allocate the record
+ buff = (char*)PlugSubAlloc(g, NULL, (size_t)lrecl + 3);
+ recl = lrecl;
+
+ do {
+ for (k = 0; go && i < len; i++)
+ switch (s[i]) {
+ case '{':
+ buff[k++] = s[i++];
+ CopyObject(g);
+ break;
+ case '[':
+ throw "JSON file is not an array of objects";
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ go = false;
+ break;
+ case ']':
+ go = next = false;
+ break;
+ default:
+ sprintf(g->Message, "Unexpected '%c' near %.*s", s[i], ARGS);
+ throw 4;
+ break;
+ }; // endswitch s[i]
+
+ // Write the record
+#ifdef __win_
+ buff[k++] = '\r';
+#endif
+ buff[k++] = '\n';
+ buff[k] = 0;
+
+ if ((fputs(buff, fs)) == EOF) {
+ sprintf(g->Message, MSG(FPUTS_ERROR), strerror(errno));
+ throw 5;
+ } // endif EOF
+
+ go = true;
+ } while (next);
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+ rc = true;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ rc = true;
+ } // end catch
+
+ return rc;
+} // end of unPretty
+
+/***********************************************************************/
+/* Copy a JSON Object. */
+/***********************************************************************/
+void JUP::CopyObject(PGLOBAL g) {
+ int level = 0;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ AddBuff(s[i++]);
+
+ if (level < 2) {
+ CopyString(g);
+ level = 1;
+ } else {
+ sprintf(g->Message, "misplaced string near %.*s", ARGS);
+ throw 3;
+ } // endif level
+
+ break;
+ case ':':
+ AddBuff(s[i++]);
+
+ if (level == 1) {
+ CopyValue(g);
+ level = 2;
+ } else {
+ sprintf(g->Message, "Unexpected ':' near %.*s", ARGS);
+ throw 3;
+ } // endif level
+
+ break;
+ case ',':
+ AddBuff(s[i]);
+
+ if (level < 2) {
+ sprintf(g->Message, "Unexpected ',' near %.*s", ARGS);
+ throw 3;
+ } else
+ level = 0;
+
+ break;
+ case '}':
+ AddBuff(s[i]);
+
+ if (level == 1) {
+ sprintf(g->Message, "Unexpected '}' near %.*s", ARGS);
+ throw 3;
+ } // endif level
+
+ return;
+ case '\n':
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ sprintf(g->Message, "Unexpected character '%c' near %.*s", s[i], ARGS);
+ throw 3;
+ }; // endswitch s[i]
+
+ throw "Unexpected EOF in Object";
+} // end of CopyObject
+
+/***********************************************************************/
+/* Copy a JSON Array. */
+/***********************************************************************/
+void JUP::CopyArray(PGLOBAL g) {
+ int level = 0;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case ',':
+ if (level < 2) {
+ sprintf(g->Message, "Unexpected ',' near %.*s", ARGS);
+ throw 2;
+ } else
+ level = 1;
+
+ AddBuff(s[i]);
+ break;
+ case ']':
+ if (level == 1) {
+ sprintf(g->Message, "Unexpected ',]' near %.*s", ARGS);
+ throw 2;
+ } // endif level
+
+ AddBuff(s[i]);
+ return;
+ case '\n':
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ if (level == 2) {
+ sprintf(g->Message, "Unexpected value near %.*s", ARGS);
+ throw 2;
+ } // endif level
+
+ CopyValue(g);
+ level = 2;
+ break;
+ }; // endswitch s[i]
+
+ throw "Unexpected EOF in array";
+} // end of CopyArray
+
+/***********************************************************************/
+/* Copy a JSON Value. */
+/***********************************************************************/
+void JUP::CopyValue(PGLOBAL g) {
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '\n':
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ goto suite;
+ } // endswitch
+
+suite:
+ switch (s[i]) {
+ case '[':
+ AddBuff(s[i++]);
+ CopyArray(g);
+ break;
+ case '{':
+ AddBuff(s[i++]);
+ CopyObject(g);
+ break;
+ case '"':
+ AddBuff(s[i++]);
+ CopyString(g);
+ break;
+ case 't':
+ if (!strncmp(s + i, "true", 4)) {
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i]);
+ } else
+ goto err;
+
+ break;
+ case 'f':
+ if (!strncmp(s + i, "false", 5)) {
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i]);
+ } else
+ goto err;
+
+ break;
+ case 'n':
+ if (!strncmp(s + i, "null", 4)) {
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i++]);
+ AddBuff(s[i]);
+ } else
+ goto err;
+
+ break;
+ default:
+ if (s[i] == '-' || isdigit(s[i]))
+ CopyNumeric(g);
+ else
+ goto err;
+
+ }; // endswitch s[i]
+
+ return;
+
+err:
+ sprintf(g->Message, "Unexpected character '%c' near %.*s", s[i], ARGS);
+ throw 1;
+} // end of CopyValue
+
+/***********************************************************************/
+/* Unescape and parse a JSON string. */
+/***********************************************************************/
+void JUP::CopyString(PGLOBAL g) {
+ for (; i < len; i++) {
+ AddBuff(s[i]);
+
+ switch (s[i]) {
+ case '"':
+ return;
+ case '\\':
+ AddBuff(s[++i]);
+ break;
+ default:
+ break;
+ }; // endswitch s[i]
+
+ } // endfor i
+
+ throw "Unexpected EOF in String";
+} // end of CopyString
+
+/***********************************************************************/
+/* Copy a JSON numeric value. */
+/***********************************************************************/
+void JUP::CopyNumeric(PGLOBAL g) {
+ bool has_dot = false;
+ bool has_e = false;
+ bool found_digit = false;
+
+ for (; i < len; i++) {
+ switch (s[i]) {
+ case '.':
+ if (!found_digit || has_dot || has_e)
+ goto err;
+
+ has_dot = true;
+ break;
+ case 'e':
+ case 'E':
+ if (!found_digit || has_e)
+ goto err;
+
+ has_e = true;
+ found_digit = false;
+ break;
+ case '+':
+ if (!has_e)
+ goto err;
+
+ // fall through
+ case '-':
+ if (found_digit)
+ goto err;
+
+ break;
+ default:
+ if (isdigit(s[i])) {
+ found_digit = true;
+ } else
+ goto fin;
+
+ }; // endswitch s[i]
+
+ AddBuff(s[i]);
+ } // endfor i
+
+fin:
+ if (!found_digit)
+ throw "No digit found";
+ else
+ i--;
+
+ return;
+
+err:
+ throw "Unexpected EOF in number";
+} // end of CopyNumeric
+
+/*********************************************************************************/
/* Utility function returning an environment variable value. */
/*********************************************************************************/
my_bool envar_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h
index ee56869a111..897b0fe9919 100644
--- a/storage/connect/jsonudf.h
+++ b/storage/connect/jsonudf.h
@@ -235,6 +235,10 @@ extern "C" {
DllExport char *json_serialize(UDF_EXEC_ARGS);
DllExport void json_serialize_deinit(UDF_INIT*);
+ DllExport my_bool jfile_convert_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* jfile_convert(UDF_EXEC_ARGS);
+ DllExport void jfile_convert_deinit(UDF_INIT*);
+
DllExport my_bool envar_init(UDF_INIT*, UDF_ARGS*, char*);
DllExport char *envar(UDF_EXEC_ARGS);
@@ -324,3 +328,38 @@ protected:
my_bool Wr; // Write mode
my_bool Jb; // Must return json item
}; // end of class JSNX
+
+/*********************************************************************************/
+/* Class JUP: used by jfile_convert to make a json file pretty = 0. */
+/*********************************************************************************/
+class JUP : public BLOCK {
+public:
+ // Constructor
+ JUP(PGLOBAL g);
+
+ // Implementation
+ void AddBuff(char c) {
+ if (k < recl)
+ buff[k++] = c;
+ else
+ throw "Record size is too small";
+ } // end of AddBuff
+
+ // Methods
+ char *UnprettyJsonFile(PGLOBAL g, char* fn, char* outfn, int lrecl);
+ bool unPretty(PGLOBAL g, int lrecl);
+ void CopyObject(PGLOBAL g);
+ void CopyArray(PGLOBAL g);
+ void CopyValue(PGLOBAL g);
+ void CopyString(PGLOBAL g);
+ void CopyNumeric(PGLOBAL g);
+
+ // Members
+ FILE* fs;
+ char* s;
+ char* buff;
+ int len;
+ int recl;
+ int i, k;
+}; // end of class JUP
+
diff --git a/storage/connect/mongo.cpp b/storage/connect/mongo.cpp
index bd3d3b893c1..5f10a89ee67 100644
--- a/storage/connect/mongo.cpp
+++ b/storage/connect/mongo.cpp
@@ -35,6 +35,7 @@
bool MakeSelector(PGLOBAL g, PFIL fp, PSTRG s);
bool IsNum(PSZ s);
+int GetDefaultDepth(void);
/***********************************************************************/
/* Make selector json representation for Mongo tables. */
@@ -248,15 +249,10 @@ MGODISC::MGODISC(PGLOBAL g, int *lg) {
/***********************************************************************/
int MGODISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ uri, PTOS topt)
{
- PCSZ level = GetStringTableOption(g, topt, "Level", NULL);
PMGODEF tdp;
- if (level) {
- lvl = atoi(level);
- lvl = (lvl > 16) ? 16 : lvl;
- } else
- lvl = 0;
-
+ lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
+ lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
all = GetBooleanTableOption(g, topt, "Fullarray", false);
/*********************************************************************/
diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result
index 4bbac236200..47fc4abbd28 100644
--- a/storage/connect/mysql-test/connect/r/json_java_2.result
+++ b/storage/connect/mysql-test/connect/r/json_java_2.result
@@ -20,12 +20,12 @@ SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
address_building 1 CHAR 10 10 0 0 address.building
-address_coord 1 CHAR 256 256 0 1 address.coord
+address_coord 1 CHAR 1024 1024 0 1 address.coord
address_street 1 CHAR 38 38 0 0 address.street
address_zipcode 1 CHAR 5 5 0 0 address.zipcode
borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
-grades_date 1 CHAR 256 256 0 1 grades.0.date
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
grades_score 5 BIGINT 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
@@ -64,16 +64,16 @@ OPTION_LIST='Level=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:2701
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `_id` char(24) NOT NULL `FIELD_FORMAT`='_id',
- `address_building` char(10) NOT NULL `FIELD_FORMAT`='address.building',
- `address_coord` varchar(256) DEFAULT NULL `FIELD_FORMAT`='address.coord',
- `address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
- `address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
`borough` char(13) NOT NULL,
`cuisine` char(64) NOT NULL,
- `grades_date` varchar(256) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
- `grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
@@ -251,15 +251,15 @@ OPTION_LIST='Driver=Java,level=2,version=2';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `_id` char(24) NOT NULL `FIELD_FORMAT`='_id',
- `address_building` char(10) NOT NULL `FIELD_FORMAT`='address.building',
- `address_coord` double(18,16) DEFAULT NULL `FIELD_FORMAT`='address.coord.0',
- `address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
- `address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
`borough` char(13) NOT NULL,
- `grades_date` char(24) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
- `grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
+ `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096
diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result
index eb8bfc022d6..720c82cd7f9 100644
--- a/storage/connect/mysql-test/connect/r/json_java_3.result
+++ b/storage/connect/mysql-test/connect/r/json_java_3.result
@@ -20,12 +20,12 @@ SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
address_building 1 CHAR 10 10 0 0 address.building
-address_coord 1 CHAR 256 256 0 1 address.coord
+address_coord 1 CHAR 1024 1024 0 1 address.coord
address_street 1 CHAR 38 38 0 0 address.street
address_zipcode 1 CHAR 5 5 0 0 address.zipcode
borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
-grades_date 1 CHAR 256 256 0 1 grades.0.date
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
grades_score 5 BIGINT 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
@@ -64,16 +64,16 @@ OPTION_LIST='Level=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:2701
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `_id` char(24) NOT NULL `FIELD_FORMAT`='_id',
- `address_building` char(10) NOT NULL `FIELD_FORMAT`='address.building',
- `address_coord` varchar(256) DEFAULT NULL `FIELD_FORMAT`='address.coord',
- `address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
- `address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
`borough` char(13) NOT NULL,
`cuisine` char(64) NOT NULL,
- `grades_date` varchar(256) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
- `grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
@@ -251,15 +251,15 @@ OPTION_LIST='Driver=Java,level=2,version=3';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `_id` char(24) NOT NULL `FIELD_FORMAT`='_id',
- `address_building` char(10) NOT NULL `FIELD_FORMAT`='address.building',
- `address_coord` double(18,16) DEFAULT NULL `FIELD_FORMAT`='address.coord.0',
- `address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
- `address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
`borough` char(13) NOT NULL,
- `grades_date` bigint(13) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
- `grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
+ `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096
diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result
index 550e94f286e..f9bfc01763e 100644
--- a/storage/connect/mysql-test/connect/r/json_mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result
@@ -20,12 +20,12 @@ SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
address_building 1 CHAR 10 10 0 0 address.building
-address_coord 1 CHAR 256 256 0 1 address.coord
+address_coord 1 CHAR 1024 1024 0 1 address.coord
address_street 1 CHAR 38 38 0 0 address.street
address_zipcode 1 CHAR 5 5 0 0 address.zipcode
borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
-grades_date 1 CHAR 256 256 0 1 grades.0.date
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
grades_score 5 BIGINT 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
@@ -64,16 +64,16 @@ OPTION_LIST='Level=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017'
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `_id` char(24) NOT NULL `FIELD_FORMAT`='_id',
- `address_building` char(10) NOT NULL `FIELD_FORMAT`='address.building',
- `address_coord` varchar(256) DEFAULT NULL `FIELD_FORMAT`='address.coord',
- `address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
- `address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
`borough` char(13) NOT NULL,
`cuisine` char(64) NOT NULL,
- `grades_date` varchar(256) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
- `grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
@@ -251,15 +251,15 @@ OPTION_LIST='Driver=C,level=2,version=0';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `_id` char(24) NOT NULL `FIELD_FORMAT`='_id',
- `address_building` char(10) NOT NULL `FIELD_FORMAT`='address.building',
- `address_coord` double(23,20) DEFAULT NULL `FIELD_FORMAT`='address.coord.0',
- `address_street` char(38) NOT NULL `FIELD_FORMAT`='address.street',
- `address_zipcode` char(5) NOT NULL `FIELD_FORMAT`='address.zipcode',
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(23,20) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
`borough` char(13) NOT NULL,
- `grades_date` bigint(13) DEFAULT NULL `FIELD_FORMAT`='grades.0.date',
- `grades_grade` char(14) DEFAULT NULL `FIELD_FORMAT`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `FIELD_FORMAT`='grades.0.score',
+ `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024
diff --git a/storage/connect/mysql-test/connect/r/updelx.result b/storage/connect/mysql-test/connect/r/updelx.result
index 2aed1e06928..bb82afcc1a8 100644
--- a/storage/connect/mysql-test/connect/r/updelx.result
+++ b/storage/connect/mysql-test/connect/r/updelx.result
@@ -978,7 +978,7 @@ DROP TABLE t1;
# FIX table
CREATE TABLE t1 (
id INT(4) KEY NOT NULL,
-msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED)
+msg VARCHAR(16) DISTRIB=CLUSTERED)
ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=4;
Warnings:
Warning 1105 No file name. Table will use t1.fix
@@ -1345,7 +1345,7 @@ DROP TABLE t1;
# BIN table
CREATE TABLE t1 (
id INT(4) KEY NOT NULL,
-msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED)
+msg VARCHAR(16) DISTRIB=CLUSTERED)
ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=8;
Warnings:
Warning 1105 No file name. Table will use t1.bin
diff --git a/storage/connect/mysql-test/connect/t/updelx.test b/storage/connect/mysql-test/connect/t/updelx.test
index 19d0d790a30..f6291432e48 100644
--- a/storage/connect/mysql-test/connect/t/updelx.test
+++ b/storage/connect/mysql-test/connect/t/updelx.test
@@ -36,7 +36,7 @@ DROP TABLE t1;
--echo # FIX table
CREATE TABLE t1 (
id INT(4) KEY NOT NULL,
-msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED)
+msg VARCHAR(16) DISTRIB=CLUSTERED)
ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=4;
-- source updelx.inc
ALTER TABLE t1 MAPPED=YES;
@@ -48,7 +48,7 @@ DROP TABLE t1;
--echo # BIN table
CREATE TABLE t1 (
id INT(4) KEY NOT NULL,
-msg VARCHAR(16) CHARSET BINARY DISTRIB=CLUSTERED)
+msg VARCHAR(16) DISTRIB=CLUSTERED)
ENGINE=CONNECT TABLE_TYPE=BIN BLOCK_SIZE=8;
-- source updelx.inc
ALTER TABLE t1 MAPPED=YES;
diff --git a/storage/connect/myutil.h b/storage/connect/myutil.h
index 6991172b39e..fa41fa47d61 100644
--- a/storage/connect/myutil.h
+++ b/storage/connect/myutil.h
@@ -6,8 +6,8 @@
enum enum_field_types PLGtoMYSQL(int type, bool dbf, char var = 0);
const char *PLGtoMYSQLtype(int type, bool dbf, char var = 0);
-int MYSQLtoPLG(char *typname, char *var = NULL);
-int MYSQLtoPLG(int mytype, char *var = NULL);
+int MYSQLtoPLG(char *typname, char *var);
+int MYSQLtoPLG(int mytype, char *var);
PCSZ MyDateFmt(int mytype);
PCSZ MyDateFmt(char *typname);
diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp
index d51d52a2678..90d3cd00d5e 100644
--- a/storage/connect/plgdbutl.cpp
+++ b/storage/connect/plgdbutl.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2018 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -1244,7 +1244,7 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp)
mp.Sub = mp.Size <= ((mp.Sub) ? maxsub : (maxsub >> 2));
if (trace(2))
- htrc("PlgDBalloc: in %p size=%d used=%d free=%d sub=%d\n",
+ htrc("PlgDBalloc: in %p size=%zd used=%zd free=%zd sub=%d\n",
arp, mp.Size, pph->To_Free, pph->FreeBlk, mp.Sub);
if (!mp.Sub) {
@@ -1260,7 +1260,7 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp)
mp.Memp = malloc(mp.Size);
if (trace(8))
- htrc("PlgDBalloc: %s(%d) at %p\n", v, mp.Size, mp.Memp);
+ htrc("PlgDBalloc: %s(%zd) at %p\n", v, mp.Size, mp.Memp);
if (!mp.Inlist && mp.Memp) {
// New allocated block, put it in the memory block chain.
@@ -1292,7 +1292,7 @@ void *PlgDBrealloc(PGLOBAL g, void *area, MBLOCK& mp, size_t newsize)
#endif
if (trace(2))
- htrc("PlgDBrealloc: %p size=%d sub=%d\n", mp.Memp, mp.Size, mp.Sub);
+ htrc("PlgDBrealloc: %p size=%zd sub=%d\n", mp.Memp, mp.Size, mp.Sub);
if (newsize == mp.Size)
return mp.Memp; // Nothing to do
@@ -1342,7 +1342,7 @@ void *PlgDBrealloc(PGLOBAL g, void *area, MBLOCK& mp, size_t newsize)
} // endif's
if (trace(8))
- htrc(" newsize=%d newp=%p sub=%d\n", mp.Size, mp.Memp, mp.Sub);
+ htrc(" newsize=%zd newp=%p sub=%d\n", mp.Size, mp.Memp, mp.Sub);
return mp.Memp;
} // end of PlgDBrealloc
@@ -1394,13 +1394,13 @@ void *PlgDBSubAlloc(PGLOBAL g, void *memp, size_t size)
pph = (PPOOLHEADER)memp;
if (trace(16))
- htrc("PlgDBSubAlloc: memp=%p size=%d used=%d free=%d\n",
+ htrc("PlgDBSubAlloc: memp=%p size=%zd used=%zd free=%zd\n",
memp, size, pph->To_Free, pph->FreeBlk);
- if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */
+ if (size > pph->FreeBlk) { /* Not enough memory left in pool */
sprintf(g->Message,
- "Not enough memory in Work area for request of %d (used=%d free=%d)",
- (int) size, pph->To_Free, pph->FreeBlk);
+ "Not enough memory in Work area for request of %zd (used=%zd free=%zd)",
+ size, pph->To_Free, pph->FreeBlk);
if (trace(1))
htrc("%s\n", g->Message);
@@ -1416,7 +1416,7 @@ void *PlgDBSubAlloc(PGLOBAL g, void *memp, size_t size)
pph->FreeBlk -= size; // New size of pool free block
if (trace(16))
- htrc("Done memp=%p used=%d free=%d\n",
+ htrc("Done memp=%p used=%zd free=%zd\n",
memp, pph->To_Free, pph->FreeBlk);
return (memp);
diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp
index 3899379ade2..e45feb31bea 100644
--- a/storage/connect/plugutil.cpp
+++ b/storage/connect/plugutil.cpp
@@ -6,7 +6,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1993-2019 */
+/* (C) Copyright to the author Olivier BERTRAND 1993-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -142,7 +142,7 @@ void htrc(char const* fmt, ...)
/* Language points on initial language name and eventual path. */
/* Return value is the pointer to the Global structure. */
/***********************************************************************/
-PGLOBAL PlugInit(LPCSTR Language, uint worksize)
+PGLOBAL PlugInit(LPCSTR Language, size_t worksize)
{
PGLOBAL g;
@@ -158,13 +158,14 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize)
} // end try/catch
g->Sarea = NULL;
- g->Createas = 0;
+ g->Createas = false;
g->Alchecked = 0;
g->Mrr = 0;
g->Activityp = NULL;
g->Xchk = NULL;
g->N = 0;
g->More = 0;
+ g->Saved_Size = 0;
strcpy(g->Message, "");
/*******************************************************************/
@@ -459,7 +460,7 @@ short GetLineLength(PGLOBAL g)
/***********************************************************************/
/* Program for memory allocation of work and language areas. */
/***********************************************************************/
-bool AllocSarea(PGLOBAL g, uint size)
+bool AllocSarea(PGLOBAL g, size_t size)
{
/*********************************************************************/
/* This is the allocation routine for the WIN32/UNIX/AIX version. */
@@ -483,7 +484,7 @@ bool AllocSarea(PGLOBAL g, uint size)
if (trace(8)) {
#endif
if (g->Sarea)
- htrc("Work area of %u allocated at %p\n", size, g->Sarea);
+ htrc("Work area of %zd allocated at %p\n", size, g->Sarea);
else
htrc("SareaAlloc: %-.256s\n", g->Message);
@@ -510,7 +511,7 @@ void FreeSarea(PGLOBAL g)
#else
if (trace(8))
#endif
- htrc("Freeing Sarea at %p size = %d\n", g->Sarea, g->Sarea_Size);
+ htrc("Freeing Sarea at %p size = %zd\n", g->Sarea, g->Sarea_Size);
g->Sarea = NULL;
g->Sarea_Size = 0;
@@ -524,11 +525,11 @@ void FreeSarea(PGLOBAL g)
/* Here there should be some verification done such as validity of */
/* the address and size not larger than memory size. */
/***********************************************************************/
-BOOL PlugSubSet(void *memp, uint size)
+BOOL PlugSubSet(void *memp, size_t size)
{
PPOOLHEADER pph = (PPOOLHEADER)memp;
- pph->To_Free = (OFFSET)sizeof(POOLHEADER);
+ pph->To_Free = (size_t)sizeof(POOLHEADER);
pph->FreeBlk = size - pph->To_Free;
return FALSE;
} /* end of PlugSubSet */
@@ -560,15 +561,15 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
pph = (PPOOLHEADER)memp;
if (trace(16))
- htrc("SubAlloc in %p size=%d used=%d free=%d\n",
+ htrc("SubAlloc in %p size=%zd used=%zd free=%zd\n",
memp, size, pph->To_Free, pph->FreeBlk);
- if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */
+ if (size > pph->FreeBlk) { /* Not enough memory left in pool */
PCSZ pname = "Work";
sprintf(g->Message,
- "Not enough memory in %-.256s area for request of %u (used=%d free=%d)",
- pname, (uint)size, pph->To_Free, pph->FreeBlk);
+ "Not enough memory in %-.256s area for request of %zu (used=%zu free=%zu)",
+ pname, size, pph->To_Free, pph->FreeBlk);
if (trace(1))
htrc("PlugSubAlloc: %-.256s\n", g->Message);
@@ -580,11 +581,11 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
/* Do the suballocation the simplest way. */
/*********************************************************************/
memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */
- pph->To_Free += (OFFSET)size; /* New offset of pool free block */
- pph->FreeBlk -= (uint)size; /* New size of pool free block */
+ pph->To_Free += size; /* New offset of pool free block */
+ pph->FreeBlk -= size; /* New size of pool free block */
if (trace(16))
- htrc("Done memp=%p used=%d free=%d\n",
+ htrc("Done memp=%p used=%zd free=%zd\n",
memp, pph->To_Free, pph->FreeBlk);
return (memp);
@@ -605,40 +606,4 @@ char *PlugDup(PGLOBAL g, const char *str)
} // end of PlugDup
-#if 0
-/***********************************************************************/
-/* This routine suballocate a copy of the passed string. */
-/***********************************************************************/
-char *PlugDup(PGLOBAL g, const char *str)
- {
- char *buf;
- size_t len;
-
- if (str && (len = strlen(str))) {
- buf = (char*)PlugSubAlloc(g, NULL, len + 1);
- strcpy(buf, str);
- } else
- buf = NULL;
-
- return(buf);
- } /* end of PlugDup */
-#endif // 0
-
-/***********************************************************************/
-/* This routine makes a pointer from an offset to a memory pointer. */
-/***********************************************************************/
-void *MakePtr(void *memp, OFFSET offset)
- {
- return ((offset == 0) ? NULL : &((char *)memp)[offset]);
- } /* end of MakePtr */
-
-/***********************************************************************/
-/* This routine makes an offset from a pointer new format. */
-/***********************************************************************/
-#if 0
-OFFSET MakeOff(void *memp, void *ptr)
- {
- return ((!ptr) ? 0 : (OFFSET)((char *)ptr - (char *)memp));
- } /* end of MakeOff */
-#endif
/*--------------------- End of PLUGUTIL program -----------------------*/
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 692ca9d0258..dbcd590c3de 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -52,19 +52,10 @@
/* External functions. */
/***********************************************************************/
USETEMP UseTemp(void);
+bool JsonAllPath(void);
+int GetDefaultDepth(void);
char *GetJsonNull(void);
-//typedef struct _jncol {
-// struct _jncol *Next;
-// char *Name;
-// char *Fmt;
-// int Type;
-// int Len;
-// int Scale;
-// bool Cbn;
-// bool Found;
-//} JCOL, *PJCL;
-
/***********************************************************************/
/* JSONColumns: construct the result blocks containing the description */
/* of all the columns of a table contained inside a JSON file. */
@@ -167,23 +158,20 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg)
jsp = NULL;
row = NULL;
sep = NULL;
- i = n = bf = ncol = lvl = 0;
- all = false;
+ i = n = bf = ncol = lvl = sz = 0;
+ all = strfy = false;
} // end of JSONDISC constructor
int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
{
char filename[_MAX_PATH];
bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
- PCSZ level = GetStringTableOption(g, topt, "Level", NULL);
-
- if (level) {
- lvl = atoi(level);
- lvl = (lvl > 16) ? 16 : lvl;
- } else
- lvl = 0;
- sep = GetStringTableOption(g, topt, "Separator", ".");
+ lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
+ lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
+ sep = GetStringTableOption(g, topt, "Separator", ".");
+ sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
+ strfy = GetBooleanTableOption(g, topt, "Stringify", false);
/*********************************************************************/
/* Open the input file. */
@@ -306,7 +294,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
// Allocate the parse work memory
PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL));
memset(G, 0, sizeof(GLOBAL));
- G->Sarea_Size = tdp->Lrecl * 10;
+ G->Sarea_Size = (size_t)tdp->Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
PlugSubSet(G->Sarea, G->Sarea_Size);
G->jump_level = 0;
@@ -403,7 +391,10 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
PJAR jar;
if ((valp = jvp ? jvp->GetValue() : NULL)) {
- jcol.Type = valp->GetType();
+ if (JsonAllPath() && !fmt[bf])
+ strcat(fmt, colname);
+
+ jcol.Type = valp->GetType();
jcol.Len = valp->GetValLen();
jcol.Scale = valp->GetValPrec();
jcol.Cbn = valp->IsNull();
@@ -482,8 +473,16 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
} // endswitch Type
} else if (lvl >= 0) {
- jcol.Type = TYPE_STRING;
- jcol.Len = 256;
+ if (strfy) {
+ if (!fmt[bf])
+ strcat(fmt, colname);
+
+ strcat(fmt, ".*");
+ } else if (JsonAllPath() && !fmt[bf])
+ strcat(fmt, colname);
+
+ jcol.Type = TYPE_STRING;
+ jcol.Len = sz;
jcol.Scale = 0;
jcol.Cbn = true;
} else
@@ -1489,7 +1488,18 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric column");
Value->Reset();
- } else
+ } else if (Value->GetType() == TYPE_BIN) {
+ if ((unsigned)Value->GetClen() >= sizeof(BSON)) {
+ ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500;
+ PBSON bsp = JbinAlloc(g, NULL, len, jsp);
+
+ strcat(bsp->Msg, " column");
+ ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON));
+ } else {
+ strcpy(g->Message, "Column size too small");
+ Value->SetValue_char(NULL, 0);
+ } // endif Clen
+ } else
Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
return Value;
@@ -1985,8 +1995,9 @@ int TDBJSON::MakeNewDoc(PGLOBAL g)
/***********************************************************************/
int TDBJSON::MakeDocument(PGLOBAL g)
{
- char *p, *memory, *objpath, *key = NULL;
+ char *p, *p1, *p2, *memory, *objpath, *key = NULL;
int len, i = 0;
+ my_bool a;
MODE mode = Mode;
PJSON jsp;
PJOB objp = NULL;
@@ -2029,22 +2040,39 @@ int TDBJSON::MakeDocument(PGLOBAL g)
if ((objpath = PlugDup(g, Objname))) {
if (*objpath == '$') objpath++;
if (*objpath == '.') objpath++;
+ p1 = (*objpath == '[') ? objpath++ : NULL;
/*********************************************************************/
/* Find the table in the tree structure. */
/*********************************************************************/
- for (; jsp && objpath; objpath = p) {
- if ((p = strchr(objpath, Sep)))
- *p++ = 0;
-
- if (*objpath != '[' && !IsNum(objpath)) {
- // objpass is a key
+ for (p = objpath; jsp && p; p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, '.');
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ if (!a && *p && *p != '[' && !IsNum(p)) {
+ // obj is a key
if (jsp->GetType() != TYPE_JOB) {
strcpy(g->Message, "Table path does not match the json file");
return RC_FX;
} // endif Type
- key = objpath;
+ key = p;
objp = jsp->GetObject();
arp = NULL;
val = objp->GetValue(key);
@@ -2055,15 +2083,15 @@ int TDBJSON::MakeDocument(PGLOBAL g)
} // endif val
} else {
- if (*objpath == '[') {
+ if (*p == '[') {
// Old style
- if (objpath[strlen(objpath) - 1] != ']') {
- sprintf(g->Message, "Invalid Table path %s", Objname);
+ if (p[strlen(p) - 1] != ']') {
+ sprintf(g->Message, "Invalid Table path near %s", p);
return RC_FX;
} else
- objpath++;
+ p++;
- } // endif objpath
+ } // endif p
if (jsp->GetType() != TYPE_JAR) {
strcpy(g->Message, "Table path does not match the json file");
@@ -2072,7 +2100,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
arp = jsp->GetArray();
objp = NULL;
- i = atoi(objpath) - B;
+ i = atoi(p) - B;
val = arp->GetValue(i);
if (!val) {
@@ -2083,7 +2111,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
} // endif
jsp = val->GetJson();
- } // endfor objpath
+ } // endfor p
} // endif objpath
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index 8c3f1013919..88aa5e2ee8b 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -68,8 +68,8 @@ public:
PCSZ sep;
char colname[65], fmt[129], buf[16];
uint *length;
- int i, n, bf, ncol, lvl;
- bool all;
+ int i, n, bf, ncol, lvl, sz;
+ bool all, strfy;
}; // end of JSONDISC
/***********************************************************************/
diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp
index 3ef2a460b9d..b1bdeffc880 100644
--- a/storage/connect/tabrest.cpp
+++ b/storage/connect/tabrest.cpp
@@ -158,16 +158,32 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
http = GetStringTableOption(g, tp, "Http", NULL);
uri = GetStringTableOption(g, tp, "Uri", NULL);
- fn = GetStringTableOption(g, tp, "Filename", "rest.json");
#if defined(MARIADB)
ftype = GetStringTableOption(g, tp, "Type", "JSON");
#else // !MARIADB
// OEM tables must specify the file type
ftype = GetStringTableOption(g, tp, "Ftype", "JSON");
#endif // !MARIADB
+ fn = GetStringTableOption(g, tp, "Filename", NULL);
+
+ if (!fn) {
+ int n, m = strlen(ftype) + 1;
+
+ strcat(strcpy(filename, tab), ".");
+ n = strlen(filename);
+
+ // Fold ftype to lower case
+ for (int i = 0; i < m; i++)
+ filename[n + i] = tolower(ftype[i]);
+
+ fn = filename;
+ tp->filename = PlugDup(g, fn);
+ } // endif fn
// We used the file name relative to recorded datapath
- snprintf(filename, sizeof filename, IF_WIN(".\\%s\\%s","./%s/%s"), db, fn);
+ PlugSetPath(filename, fn, db);
+ //strcat(strcat(strcat(strcpy(filename, "."), slash), db), slash);
+ //strncat(filename, fn, _MAX_PATH - strlen(filename));
// Retrieve the file from the web and copy it locally
if (http && grf(g->Message, trace(515), http, uri, filename)) {
@@ -226,12 +242,10 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
Http = GetStringCatInfo(g, "Http", NULL);
Uri = GetStringCatInfo(g, "Uri", NULL);
- Fn = GetStringCatInfo(g, "Filename", "rest.json");
+ Fn = GetStringCatInfo(g, "Filename", NULL);
// We used the file name relative to recorded datapath
- //PlugSetPath(filename, Fn, GetPath());
- strcpy(filename, GetPath());
- strncat(filename, Fn, _MAX_PATH - strlen(filename));
+ PlugSetPath(filename, Fn, GetPath());
// Retrieve the file from the web and copy it locally
rc = grf(g->Message, xt, Http, Uri, filename);
@@ -269,7 +283,7 @@ PTDB RESTDEF::GetTable(PGLOBAL g, MODE m)
if (trace(515))
htrc("REST GetTable mode=%d\n", m);
- if (m != MODE_READ && m != MODE_READX) {
+ if (m != MODE_READ && m != MODE_READX && m != MODE_ANY) {
strcpy(g->Message, "REST tables are currently read only");
return NULL;
} // endif m
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp
index 10af70db990..e82e9b6823e 100644
--- a/storage/connect/tabxml.cpp
+++ b/storage/connect/tabxml.cpp
@@ -3,7 +3,7 @@
/* ------------- */
/* Version 3.0 */
/* */
-/* Author Olivier BERTRAND 2007 - 2017 */
+/* Author Olivier BERTRAND 2007 - 2020 */
/* */
/* This program are the XML tables classes using MS-DOM or libxml2. */
/***********************************************************************/
@@ -62,6 +62,8 @@ extern "C" char version[];
#define TYPE_UNKNOWN 12 /* Must be greater than other types */
#define XLEN(M) sizeof(M) - strlen(M) - 1 /* To avoid overflow*/
+int GetDefaultDepth(void);
+
/***********************************************************************/
/* Class and structure used by XMLColumns. */
/***********************************************************************/
@@ -149,8 +151,9 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
strcpy(g->Message, MSG(MISSING_FNAME));
return NULL;
} else {
- lvl = GetIntegerTableOption(g, topt, "Level", 0);
- lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
+ lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
+ lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
+ lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
} // endif fn
if (trace(1))
diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc
index 55597c426eb..c8f38b68015 100644
--- a/storage/connect/user_connect.cc
+++ b/storage/connect/user_connect.cc
@@ -28,7 +28,7 @@
*/
/****************************************************************************/
-/* Author: Olivier Bertrand -- bertrandop@gmail.com -- 2004-2015 */
+/* Author: Olivier Bertrand -- bertrandop@gmail.com -- 2004-2020 */
/****************************************************************************/
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation // gcc: Class implementation
@@ -58,8 +58,8 @@ PCONNECT user_connect::to_users= NULL;
/****************************************************************************/
/* Get the work_size SESSION variable value . */
/****************************************************************************/
-uint GetWorkSize(void);
-void SetWorkSize(uint);
+size_t GetWorkSize(void);
+void SetWorkSize(size_t);
/* -------------------------- class user_connect -------------------------- */
@@ -97,14 +97,14 @@ user_connect::~user_connect()
bool user_connect::user_init()
{
// Initialize Plug-like environment
- uint worksize= GetWorkSize();
+ size_t worksize= GetWorkSize();
PACTIVITY ap= NULL;
PDBUSER dup= NULL;
// Areasize= 64M because of VEC tables. Should be parameterisable
//g= PlugInit(NULL, 67108864);
//g= PlugInit(NULL, 134217728); // 128M was because of old embedded tests
- g= PlugInit(NULL, worksize);
+ g= PlugInit(NULL, (size_t)worksize);
// Check whether the initialization is complete
if (!g || !g->Sarea || PlugSubSet(g->Sarea, g->Sarea_Size)
@@ -157,16 +157,17 @@ void user_connect::SetHandler(ha_connect *hc)
bool user_connect::CheckCleanup(bool force)
{
if (thdp->query_id > last_query_id || force) {
- uint worksize= GetWorkSize(), size = g->Sarea_Size;
+ size_t worksize = GetWorkSize();
PlugCleanup(g, true);
- if (size != worksize) {
+ if (worksize != g->Sarea_Size) {
FreeSarea(g);
+ g->Saved_Size = g->Sarea_Size;
// Check whether the work area could be allocated
if (AllocSarea(g, worksize)) {
- AllocSarea(g, size);
+ AllocSarea(g, g->Saved_Size);
SetWorkSize(g->Sarea_Size); // Was too big
} // endif sarea
@@ -174,10 +175,11 @@ bool user_connect::CheckCleanup(bool force)
PlugSubSet(g->Sarea, g->Sarea_Size);
g->Xchk = NULL;
- g->Createas = 0;
+ g->Createas = false;
g->Alchecked = 0;
g->Mrr = 0;
g->More = 0;
+ g->Saved_Size = 0;
last_query_id= thdp->query_id;
if (trace(65) && !force)
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index 2fad67f453b..5951b26e81e 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -2251,6 +2251,15 @@ void BINVAL::SetBinValue(void *p)
} // end of SetBinValue
/***********************************************************************/
+/* BINVAL SetBinValue: fill string with len bytes. */
+/***********************************************************************/
+void BINVAL::SetBinValue(void* p, ulong len)
+{
+ memcpy(Binp, p, len);
+ Len = len;
+} // end of SetBinValue
+
+/***********************************************************************/
/* GetBinValue: fill a buffer with the internal binary value. */
/* This function checks whether the buffer length is enough and */
/* returns true if not. Actual filling occurs only if go is true. */
diff --git a/storage/connect/value.h b/storage/connect/value.h
index 4f7d9a440fa..ee7a1c8032f 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -115,8 +115,8 @@ class DllExport VALUE : public BLOCK {
virtual void SetValue(ulonglong) {assert(false);}
virtual void SetValue(double) {assert(false);}
virtual void SetValue_pvblk(PVBLK blk, int n) = 0;
- virtual void SetBinValue(void *p) = 0;
- virtual bool GetBinValue(void *buf, int buflen, bool go) = 0;
+ virtual void SetBinValue(void* p) = 0;
+ virtual bool GetBinValue(void *buf, int buflen, bool go) = 0;
virtual int ShowValue(char *buf, int len) = 0;
virtual char *GetCharString(char *p) = 0;
virtual bool IsEqual(PVAL vp, bool chktype) = 0;
@@ -385,7 +385,8 @@ class DllExport BINVAL: public VALUE {
virtual void SetValue(ulonglong n);
virtual void SetValue(double f);
virtual void SetBinValue(void *p);
- virtual bool GetBinValue(void *buf, int buflen, bool go);
+ virtual void SetBinValue(void* p, ulong len);
+ virtual bool GetBinValue(void *buf, int buflen, bool go);
virtual int CompareValue(PVAL) {assert(false); return 0;}
virtual int ShowValue(char *buf, int len);
virtual char *GetCharString(char *p);
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 80fc90eb84b..ff5bb4b3ec4 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -26,12 +26,8 @@
#include "ha_heap.h"
#include "sql_base.h" // enum_tdc_remove_table_type
-static handler *heap_create_handler(handlerton *hton,
- TABLE_SHARE *table,
- MEM_ROOT *mem_root);
-static int
-heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
- HP_CREATE_INFO *hp_create_info);
+static handler *heap_create_handler(handlerton *, TABLE_SHARE *, MEM_ROOT *);
+static int heap_prepare_hp_create_info(TABLE *, bool, HP_CREATE_INFO *);
int heap_panic(handlerton *hton, ha_panic_function flag)
@@ -368,7 +364,7 @@ int ha_heap::info(uint flag)
HEAPINFO hp_info;
if (!table)
- return 1;
+ return 0;
(void) heap_info(file,&hp_info,flag);
@@ -603,16 +599,15 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
}
-static int
-heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
- HP_CREATE_INFO *hp_create_info)
+static int heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
+ HP_CREATE_INFO *hp_create_info)
{
- uint key, parts, mem_per_row= 0, keys= table_arg->s->keys;
+ TABLE_SHARE *share= table_arg->s;
+ uint key, parts, mem_per_row= 0, keys= share->keys;
uint auto_key= 0, auto_key_type= 0;
ha_rows max_rows;
HP_KEYDEF *keydef;
HA_KEYSEG *seg;
- TABLE_SHARE *share= table_arg->s;
bool found_real_auto_increment= 0;
bzero(hp_create_info, sizeof(*hp_create_info));
@@ -620,11 +615,11 @@ heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
for (key= parts= 0; key < keys; key++)
parts+= table_arg->key_info[key].user_defined_key_parts;
- if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) +
- parts * sizeof(HA_KEYSEG),
- MYF(MY_WME | MY_THREAD_SPECIFIC))))
+ if (!my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC),
+ &keydef, keys * sizeof(HP_KEYDEF),
+ &seg, parts * sizeof(HA_KEYSEG),
+ NULL))
return my_errno;
- seg= reinterpret_cast<HA_KEYSEG*>(keydef + keys);
for (key= 0; key < keys; key++)
{
KEY *pos= table_arg->key_info+key;
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt
index cbd280af223..a1c54d10776 100644
--- a/storage/innobase/CMakeLists.txt
+++ b/storage/innobase/CMakeLists.txt
@@ -28,6 +28,7 @@ SET(INNOBASE_SOURCES
btr/btr0scrub.cc
btr/btr0sea.cc
btr/btr0defragment.cc
+ buf/buf0block_hint.cc
buf/buf0buddy.cc
buf/buf0buf.cc
buf/buf0dblwr.cc
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 04aaccec84e..8ee7d167805 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -283,7 +283,7 @@ the index.
ulint
btr_height_get(
/*===========*/
- dict_index_t* index, /*!< in: index tree */
+ const dict_index_t* index, /*!< in: index tree */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint height=0;
@@ -602,7 +602,7 @@ Gets the number of pages in a B-tree.
ulint
btr_get_size(
/*=========*/
- dict_index_t* index, /*!< in: index */
+ const dict_index_t* index, /*!< in: index */
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index d998463ecf4..51c91c5b037 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -699,6 +699,8 @@ PageBulk::latch()
m_index->set_modified(m_mtr);
}
+ ut_ad(m_block->page.buf_fix_count);
+
/* In case the block is S-latched by page_cleaner. */
if (!buf_page_optimistic_get(RW_X_LATCH, m_block, m_modify_clock,
__FILE__, __LINE__, &m_mtr)) {
@@ -717,6 +719,8 @@ PageBulk::latch()
buf_block_buf_fix_dec(m_block);
+ ut_ad(m_block->page.buf_fix_count);
+
ut_ad(m_cur_rec > m_page && m_cur_rec < m_heap_top);
return (m_err);
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 0cd65ad0a4c..59c6d06d5af 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -776,6 +776,8 @@ btr_cur_optimistic_latch_leaves(
ulint mode;
ulint left_page_no;
ulint curr_page_no;
+ ut_ad(block->page.buf_fix_count);
+ ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
switch (*latch_mode) {
case BTR_SEARCH_LEAF:
@@ -787,20 +789,10 @@ btr_cur_optimistic_latch_leaves(
mode = *latch_mode == BTR_SEARCH_PREV
? RW_S_LATCH : RW_X_LATCH;
- buf_page_mutex_enter(block);
- if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
- buf_page_mutex_exit(block);
- return(false);
- }
- /* pin the block not to be relocated */
- buf_block_buf_fix_inc(block, file, line);
- buf_page_mutex_exit(block);
-
rw_lock_s_lock(&block->lock);
if (block->modify_clock != modify_clock) {
rw_lock_s_unlock(&block->lock);
-
- goto unpin_failed;
+ return false;
}
curr_page_no = block->page.id.page_no();
@@ -826,7 +818,7 @@ btr_cur_optimistic_latch_leaves(
/* release the left block */
btr_leaf_page_release(
cursor->left_block, mode, mtr);
- goto unpin_failed;
+ return false;
}
} else {
cursor->left_block = NULL;
@@ -836,23 +828,28 @@ btr_cur_optimistic_latch_leaves(
file, line, mtr)) {
if (btr_page_get_prev(buf_block_get_frame(block))
== left_page_no) {
- buf_block_buf_fix_dec(block);
+ /* block was already buffer-fixed while
+ entering the function and
+ buf_page_optimistic_get() buffer-fixes
+ it again. */
+ ut_ad(2 <= block->page.buf_fix_count);
*latch_mode = mode;
return(true);
} else {
- /* release the block */
+ /* release the block and decrement of
+ buf_fix_count which was incremented
+ in buf_page_optimistic_get() */
btr_leaf_page_release(block, mode, mtr);
}
}
+ ut_ad(block->page.buf_fix_count);
/* release the left block */
if (cursor->left_block != NULL) {
btr_leaf_page_release(cursor->left_block,
mode, mtr);
}
-unpin_failed:
- /* unpin the block */
- buf_block_buf_fix_dec(block);
+
return(false);
default:
@@ -1416,12 +1413,7 @@ btr_cur_search_to_nth_level_func(
guess = NULL;
#else
info = btr_search_get_info(index);
-
- if (!buf_pool_is_obsolete(info->withdraw_clock)) {
- guess = info->root_guess;
- } else {
- guess = NULL;
- }
+ guess = info->root_guess;
#ifdef BTR_CUR_HASH_ADAPT
@@ -1851,10 +1843,7 @@ retry_page_get:
}
#ifdef BTR_CUR_ADAPT
- if (block != guess) {
- info->root_guess = block;
- info->withdraw_clock = buf_withdraw_clock;
- }
+ info->root_guess = block;
#endif
}
@@ -6847,21 +6836,19 @@ btr_record_not_null_field_in_rec(
}
}
-/*******************************************************************//**
-Estimates the number of different key values in a given index, for
+/** Estimates the number of different key values in a given index, for
each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed
0..n_uniq-1) and the number of pages that were sampled is saved in
-index->stat_n_sample_sizes[].
+result.n_sample_sizes[].
If innodb_stats_method is nulls_ignored, we also record the number of
non-null values for each prefix and stored the estimates in
-array index->stat_n_non_null_key_vals.
-@return true if the index is available and we get the estimated numbers,
-false if the index is unavailable. */
-bool
-btr_estimate_number_of_different_key_vals(
-/*======================================*/
- dict_index_t* index) /*!< in: index */
+array result.n_non_null_key_vals.
+@param[in] index index
+@return vector with statistics information
+empty vector if the index is unavailable. */
+std::vector<index_field_stats_t>
+btr_estimate_number_of_different_key_vals(dict_index_t* index)
{
btr_cur_t cursor;
page_t* page;
@@ -6881,11 +6868,11 @@ btr_estimate_number_of_different_key_vals(
rec_offs* offsets_rec = NULL;
rec_offs* offsets_next_rec = NULL;
+ std::vector<index_field_stats_t> result;
+
/* For spatial index, there is no such stats can be
fetched. */
- if (dict_index_is_spatial(index)) {
- return(false);
- }
+ ut_ad(!dict_index_is_spatial(index));
n_cols = dict_index_get_n_unique(index);
@@ -6994,7 +6981,7 @@ btr_estimate_number_of_different_key_vals(
mtr_commit(&mtr);
mem_heap_free(heap);
- return(false);
+ return result;
}
/* Count the number of different key values for each prefix of
@@ -7100,8 +7087,12 @@ exit_loop:
also the pages used for external storage of fields (those pages are
included in index->stat_n_leaf_pages) */
+ result.reserve(n_cols);
+
for (j = 0; j < n_cols; j++) {
- index->stat_n_diff_key_vals[j]
+ index_field_stats_t stat;
+
+ stat.n_diff_key_vals
= BTR_TABLE_STATS_FROM_SAMPLE(
n_diff[j], index, n_sample_pages,
total_external_size, not_empty_flag);
@@ -7122,25 +7113,23 @@ exit_loop:
add_on = n_sample_pages;
}
- index->stat_n_diff_key_vals[j] += add_on;
+ stat.n_diff_key_vals += add_on;
- index->stat_n_sample_sizes[j] = n_sample_pages;
+ stat.n_sample_sizes = n_sample_pages;
- /* Update the stat_n_non_null_key_vals[] with our
- sampled result. stat_n_non_null_key_vals[] is created
- and initialized to zero in dict_index_add_to_cache(),
- along with stat_n_diff_key_vals[] array */
if (n_not_null != NULL) {
- index->stat_n_non_null_key_vals[j] =
+ stat.n_non_null_key_vals =
BTR_TABLE_STATS_FROM_SAMPLE(
n_not_null[j], index, n_sample_pages,
total_external_size, not_empty_flag);
}
+
+ result.push_back(stat);
}
mem_heap_free(heap);
- return(true);
+ return result;
}
/*================== EXTERNAL STORAGE OF BIG FIELDS ===================*/
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index 37444ee974d..9c5216dc015 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -198,11 +198,10 @@ before_first:
cursor->old_n_fields,
&cursor->old_rec_buf,
&cursor->buf_size);
- cursor->block_when_stored = block;
+ cursor->block_when_stored.store(block);
/* Function try to check if block is S/X latch. */
cursor->modify_clock = buf_block_get_modify_clock(block);
- cursor->withdraw_clock = buf_withdraw_clock;
}
/**************************************************************//**
@@ -232,6 +231,26 @@ btr_pcur_copy_stored_position(
pcur_receive->old_n_fields = pcur_donate->old_n_fields;
}
+/** Structure acts as functor to do the latching of leaf pages.
+It returns true if latching of leaf pages succeeded and false
+otherwise. */
+struct optimistic_latch_leaves
+{
+ btr_pcur_t *const cursor;
+ ulint *latch_mode;
+ mtr_t *const mtr;
+
+ optimistic_latch_leaves(btr_pcur_t *cursor, ulint *latch_mode, mtr_t *mtr)
+ :cursor(cursor), latch_mode(latch_mode), mtr(mtr) {}
+
+ bool operator() (buf_block_t *hint) const
+ {
+ return hint && btr_cur_optimistic_latch_leaves(
+ hint, cursor->modify_clock, latch_mode,
+ btr_pcur_get_btr_cur(cursor), __FILE__, __LINE__, mtr);
+ }
+};
+
/**************************************************************//**
Restores the stored position of a persistent cursor bufferfixing the page and
obtaining the specified latches. If the cursor position was saved when the
@@ -294,7 +313,7 @@ btr_pcur_restore_position_func(
cursor->latch_mode =
BTR_LATCH_MODE_WITHOUT_INTENTION(latch_mode);
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
- cursor->block_when_stored = btr_pcur_get_block(cursor);
+ cursor->block_when_stored.clear();
return(FALSE);
}
@@ -309,12 +328,9 @@ btr_pcur_restore_position_func(
case BTR_MODIFY_PREV:
/* Try optimistic restoration. */
- if (!buf_pool_is_obsolete(cursor->withdraw_clock)
- && btr_cur_optimistic_latch_leaves(
- cursor->block_when_stored, cursor->modify_clock,
- &latch_mode, btr_pcur_get_btr_cur(cursor),
- file, line, mtr)) {
-
+ if (cursor->block_when_stored.run_with_hint(
+ optimistic_latch_leaves(cursor, &latch_mode,
+ mtr))) {
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
cursor->latch_mode = latch_mode;
@@ -412,11 +428,10 @@ btr_pcur_restore_position_func(
since the cursor can now be on a different page!
But we can retain the value of old_rec */
- cursor->block_when_stored = btr_pcur_get_block(cursor);
+ cursor->block_when_stored.store(btr_pcur_get_block(cursor));
cursor->modify_clock = buf_block_get_modify_clock(
- cursor->block_when_stored);
+ cursor->block_when_stored.block());
cursor->old_stored = true;
- cursor->withdraw_clock = buf_withdraw_clock;
mem_heap_free(heap);
diff --git a/storage/innobase/buf/buf0block_hint.cc b/storage/innobase/buf/buf0block_hint.cc
new file mode 100644
index 00000000000..9f974e8304d
--- /dev/null
+++ b/storage/innobase/buf/buf0block_hint.cc
@@ -0,0 +1,78 @@
+/*****************************************************************************
+
+Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2020, MariaDB Corporation.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License, version 2.0, as published by the
+Free Software Foundation.
+
+This program is also distributed with certain software (including but not
+limited to OpenSSL) that is licensed under separate terms, as designated in a
+particular file or component or in included license documentation. The authors
+of MySQL hereby grant you an additional permission to link the program and
+your derivative works with the separately licensed software that they have
+included with MySQL.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
+for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+*****************************************************************************/
+
+#include "buf0block_hint.h"
+namespace buf {
+
+void Block_hint::buffer_fix_block_if_still_valid()
+{
+ /* We need to check if m_block points to one of chunks. For this to be
+ meaningful we need to prevent freeing memory while we check, and until we
+ buffer-fix the block. For this purpose it is enough to latch any of the many
+ latches taken by buf_resize().
+ However, for buffer-fixing to be meaningful, the block has to contain a page
+ (as opposed to being already empty, which might mean that buf_pool_resize()
+ can proceed and free it once we free the s-latch), so we confirm that the
+ block contains a page. However, it is not sufficient to check that this is
+ just any page, because just after we check it could get freed, unless we
+ have a latch which prevents this. This is tricky because page_hash latches
+ are sharded by page_id and we don't know the page_id until we look into the
+ block. To solve this chicken-and-egg problem somewhat, we latch the shard
+ for the m_page_id and compare block->page.id to it - so if is equal then we
+ can be reasonably sure that we have the correct latch.
+ There is still a theoretical problem here, where other threads might try
+ to modify the m_block->page.id while we are comparing it, but the chance of
+ accidentally causing the old space_id == m_page_id.m_space and the new
+ page_no == m_page_id.m_page_no is minimal as compilers emit a single 8-byte
+ comparison instruction to compare both at the same time atomically, and f()
+ will probably double-check the block->page.id again, anyway.
+ Finally, assuming that we have correct hash bucket latched, we should check if
+ the state of the block is BUF_BLOCK_FILE_PAGE before buffer-fixing the block,
+ as otherwise we risk buffer-fixing and operating on a block, which is already
+ meant to be freed. In particular, buf_LRU_free_page() first calls
+ buf_LRU_block_remove_hashed() under hash bucket latch protection to change the
+ state to BUF_BLOCK_REMOVE_HASH and then releases the latch. Later it calls
+ buf_LRU_block_free_hashed_page() without any latch to change the state to
+ BUF_BLOCK_MEMORY and reset the page's id, which means buf_resize() can free it
+ regardless of our buffer-fixing. */
+ if (m_block)
+ {
+ const buf_pool_t *const buf_pool= buf_pool_get(m_page_id);
+ rw_lock_t *latch= buf_page_hash_lock_get(buf_pool, m_page_id);
+ rw_lock_s_lock(latch);
+ /* If not own buf_pool_mutex, page_hash can be changed. */
+ latch= buf_page_hash_lock_s_confirm(latch, buf_pool, m_page_id);
+ if (buf_pool->is_block_field(m_block) &&
+ m_page_id == m_block->page.id &&
+ buf_block_get_state(m_block) == BUF_BLOCK_FILE_PAGE)
+ buf_block_buf_fix_inc(m_block, __FILE__, __LINE__);
+ else
+ clear();
+ rw_lock_s_unlock(latch);
+ }
+}
+} // namespace buf
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index b13ac2ef2ac..6095f71f839 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -342,14 +342,6 @@ buf_pool_t* buf_pool_ptr;
/** true when resizing buffer pool is in the critical path. */
volatile bool buf_pool_resizing;
-/** true when withdrawing buffer pool pages might cause page relocation */
-volatile bool buf_pool_withdrawing;
-
-/** the clock is incremented every time a pointer to a page may become obsolete;
-if the withdrwa clock has not changed, the pointer is still valid in buffer
-pool. if changed, the pointer might not be in buffer pool any more. */
-volatile ulint buf_withdraw_clock;
-
/** Map of buffer pool chunks by its first frame address
This is newly made by initialization of buffer pool and buf_resize_thread.
Currently, no need mutex protection for update. */
@@ -2090,8 +2082,6 @@ buf_pool_init(
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
buf_pool_resizing = false;
- buf_pool_withdrawing = false;
- buf_withdraw_clock = 0;
buf_pool_ptr = (buf_pool_t*) ut_zalloc_nokey(
n_instances * sizeof *buf_pool_ptr);
@@ -2151,7 +2141,6 @@ buf_page_realloc(
{
buf_block_t* new_block;
- ut_ad(buf_pool_withdrawing);
ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
@@ -2573,9 +2562,6 @@ buf_pool_withdraw_blocks(
ib::info() << "buffer pool " << i << " : withdrawn target "
<< UT_LIST_GET_LEN(buf_pool->withdraw) << " blocks.";
- /* retry is not needed */
- ++buf_withdraw_clock;
-
return(false);
}
@@ -2672,7 +2658,6 @@ buf_pool_resize()
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
ut_ad(!buf_pool_resizing);
- ut_ad(!buf_pool_withdrawing);
ut_ad(srv_buf_pool_chunk_unit > 0);
new_instance_size = srv_buf_pool_size / srv_buf_pool_instances;
@@ -2740,7 +2725,6 @@ buf_pool_resize()
ut_ad(buf_pool->withdraw_target == 0);
buf_pool->withdraw_target = withdraw_target;
- buf_pool_withdrawing = true;
}
}
@@ -2765,7 +2749,6 @@ withdraw_retry:
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
/* abort to resize for shutdown. */
- buf_pool_withdrawing = false;
return;
}
@@ -2827,7 +2810,6 @@ withdraw_retry:
goto withdraw_retry;
}
- buf_pool_withdrawing = false;
buf_resize_status("Latching whole of buffer pool.");
@@ -4011,37 +3993,6 @@ buf_block_from_ahi(const byte* ptr)
/********************************************************************//**
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
-the buf_block_t itself or a member of it. This functions checks one of
-the buffer pool instances.
-@return TRUE if ptr belongs to a buf_block_t struct */
-static
-ibool
-buf_pointer_is_block_field_instance(
-/*================================*/
- buf_pool_t* buf_pool, /*!< in: buffer pool instance */
- const void* ptr) /*!< in: pointer not dereferenced */
-{
- const buf_chunk_t* chunk = buf_pool->chunks;
- const buf_chunk_t* const echunk = chunk + ut_min(
- buf_pool->n_chunks, buf_pool->n_chunks_new);
-
- /* TODO: protect buf_pool->chunks with a mutex (the older pointer will
- currently remain while during buf_pool_resize()) */
- while (chunk < echunk) {
- if (ptr >= (void*) chunk->blocks
- && ptr < (void*) (chunk->blocks + chunk->size)) {
-
- return(TRUE);
- }
-
- chunk++;
- }
-
- return(FALSE);
-}
-
-/********************************************************************//**
-Find out if a pointer belongs to a buf_block_t. It can be a pointer to
the buf_block_t itself or a member of it
@return TRUE if ptr belongs to a buf_block_t struct */
ibool
@@ -4052,11 +4003,7 @@ buf_pointer_is_block_field(
ulint i;
for (i = 0; i < srv_buf_pool_instances; i++) {
- ibool found;
-
- found = buf_pointer_is_block_field_instance(
- buf_pool_from_array(i), ptr);
- if (found) {
+ if (buf_pool_from_array(i)->is_block_field(ptr)) {
return(TRUE);
}
}
@@ -4064,25 +4011,6 @@ buf_pointer_is_block_field(
return(FALSE);
}
-/********************************************************************//**
-Find out if a buffer block was created by buf_chunk_init().
-@return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */
-static
-ibool
-buf_block_is_uncompressed(
-/*======================*/
- buf_pool_t* buf_pool, /*!< in: buffer pool instance */
- const buf_block_t* block) /*!< in: pointer to block,
- not dereferenced */
-{
- if ((((ulint) block) % sizeof *block) != 0) {
- /* The pointer should be aligned. */
- return(FALSE);
- }
-
- return(buf_pointer_is_block_field_instance(buf_pool, (void*) block));
-}
-
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Return true if probe is enabled.
@@ -4322,7 +4250,7 @@ loop:
has been allocated by buf_page_alloc_descriptor(),
it may have been freed by buf_relocate(). */
- if (!buf_block_is_uncompressed(buf_pool, block)
+ if (!buf_pool->is_block_field(block)
|| page_id != block->page.id
|| buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index d33e1dd3c3a..2697afc2802 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1785,6 +1785,12 @@ void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep)
#ifdef BTR_CUR_HASH_ADAPT
if (UNIV_UNLIKELY(UT_LIST_GET_LEN(table->freed_indexes) != 0)) {
+ if (table->fts) {
+ fts_optimize_remove_table(table);
+ fts_free(table);
+ table->fts = NULL;
+ }
+
table->vc_templ = NULL;
table->id = 0;
return;
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index 70fa9c8245e..ddd2b99ef21 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -124,19 +124,22 @@ bool dict_col_t::same_encoding(uint16_t a, uint16_t b)
return false;
}
-/**********************************************************************//**
-Creates a table memory object.
+/** Create a table memory object.
+@param name table name
+@param space tablespace
+@param n_cols total number of columns (both virtual and non-virtual)
+@param n_v_cols number of virtual columns
+@param flags table flags
+@param flags2 table flags2
@return own: table object */
dict_table_t*
dict_mem_table_create(
-/*==================*/
- const char* name, /*!< in: table name */
- fil_space_t* space, /*!< in: tablespace */
- ulint n_cols, /*!< in: total number of columns including
- virtual and non-virtual columns */
- ulint n_v_cols,/*!< in: number of virtual columns */
- ulint flags, /*!< in: table flags */
- ulint flags2) /*!< in: table flags2 */
+ const char* name,
+ fil_space_t* space,
+ ulint n_cols,
+ ulint n_v_cols,
+ ulint flags,
+ ulint flags2)
{
dict_table_t* table;
mem_heap_t* heap;
@@ -198,9 +201,6 @@ dict_mem_table_create(
new(&table->foreign_set) dict_foreign_set();
new(&table->referenced_set) dict_foreign_set();
- rw_lock_create(dict_table_stats_key, &table->stats_latch,
- SYNC_INDEX_TREE);
-
return(table);
}
@@ -245,8 +245,6 @@ dict_mem_table_free(
UT_DELETE(table->s_cols);
- rw_lock_free(&table->stats_latch);
-
mem_heap_free(table->heap);
}
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index e593f1e8de2..950e33c090b 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -483,8 +483,6 @@ dict_stats_table_clone_create(
ut_d(t->magic_n = DICT_TABLE_MAGIC_N);
- rw_lock_create(dict_table_stats_key, &t->stats_latch, SYNC_INDEX_TREE);
-
return(t);
}
@@ -497,7 +495,6 @@ dict_stats_table_clone_free(
/*========================*/
dict_table_t* t) /*!< in: dummy table object to free */
{
- rw_lock_free(&t->stats_latch);
mem_heap_free(t->heap);
}
@@ -514,7 +511,7 @@ dict_stats_empty_index(
{
ut_ad(!(index->type & DICT_FTS));
ut_ad(!dict_index_is_ibuf(index));
- ut_ad(rw_lock_own(&index->table->stats_latch, RW_LOCK_X));
+ ut_ad(mutex_own(&dict_sys.mutex));
ulint n_uniq = index->n_uniq;
@@ -544,10 +541,9 @@ dict_stats_empty_table(
bool empty_defrag_stats)
/*!< in: whether to empty defrag stats */
{
- /* Zero the stats members */
-
- rw_lock_x_lock(&table->stats_latch);
+ mutex_enter(&dict_sys.mutex);
+ /* Zero the stats members */
table->stat_n_rows = 0;
table->stat_clustered_index_size = 1;
/* 1 page for each index, not counting the clustered */
@@ -571,8 +567,7 @@ dict_stats_empty_table(
}
table->stat_initialized = TRUE;
-
- rw_lock_x_unlock(&table->stats_latch);
+ mutex_exit(&dict_sys.mutex);
}
/*********************************************************************//**
@@ -671,6 +666,8 @@ dict_stats_copy(
to have the same statistics as if
the table was empty */
{
+ ut_ad(mutex_own(&dict_sys.mutex));
+
dst->stats_last_recalc = src->stats_last_recalc;
dst->stat_n_rows = src->stat_n_rows;
dst->stat_clustered_index_size = src->stat_clustered_index_size;
@@ -788,8 +785,6 @@ dict_stats_snapshot_create(
{
mutex_enter(&dict_sys.mutex);
- rw_lock_s_lock(&table->stats_latch);
-
dict_stats_assert_initialized(table);
dict_table_t* t;
@@ -803,8 +798,6 @@ dict_stats_snapshot_create(
t->stats_sample_pages = table->stats_sample_pages;
t->stats_bg_flag = table->stats_bg_flag;
- rw_lock_s_unlock(&table->stats_latch);
-
mutex_exit(&dict_sys.mutex);
return(t);
@@ -844,10 +837,14 @@ dict_stats_update_transient_for_index(
Initialize some bogus index cardinality
statistics, so that the data can be queried in
various means, also via secondary indexes. */
+ mutex_enter(&dict_sys.mutex);
dict_stats_empty_index(index, false);
+ mutex_exit(&dict_sys.mutex);
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
} else if (ibuf_debug && !dict_index_is_clust(index)) {
+ mutex_enter(&dict_sys.mutex);
dict_stats_empty_index(index, false);
+ mutex_exit(&dict_sys.mutex);
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
} else {
mtr_t mtr;
@@ -868,7 +865,9 @@ dict_stats_update_transient_for_index(
switch (size) {
case ULINT_UNDEFINED:
+ mutex_enter(&dict_sys.mutex);
dict_stats_empty_index(index, false);
+ mutex_exit(&dict_sys.mutex);
return;
case 0:
/* The root node of the tree is a leaf */
@@ -880,11 +879,23 @@ dict_stats_update_transient_for_index(
/* Do not continue if table decryption has failed or
table is already marked as corrupted. */
if (index->is_readable()) {
- /* We don't handle the return value since it
- will be false only when some thread is
- dropping the table and we don't have to empty
- the statistics of the to be dropped index */
- btr_estimate_number_of_different_key_vals(index);
+ std::vector<index_field_stats_t> stats
+ = btr_estimate_number_of_different_key_vals(
+ index);
+
+ if (!stats.empty()) {
+ ut_ad(!mutex_own(&dict_sys.mutex));
+ mutex_enter(&dict_sys.mutex);
+ for (size_t i = 0; i < stats.size(); ++i) {
+ index->stat_n_diff_key_vals[i]
+ = stats[i].n_diff_key_vals;
+ index->stat_n_sample_sizes[i]
+ = stats[i].n_sample_sizes;
+ index->stat_n_non_null_key_vals[i]
+ = stats[i].n_non_null_key_vals;
+ }
+ mutex_exit(&dict_sys.mutex);
+ }
}
}
}
@@ -901,6 +912,8 @@ dict_stats_update_transient(
/*========================*/
dict_table_t* table) /*!< in/out: table */
{
+ ut_ad(!mutex_own(&dict_sys.mutex));
+
dict_index_t* index;
ulint sum_of_index_sizes = 0;
@@ -926,27 +939,25 @@ dict_stats_update_transient(
ut_ad(!dict_index_is_ibuf(index));
- if (index->type & DICT_FTS || dict_index_is_spatial(index)) {
+ if (index->type & (DICT_FTS | DICT_SPATIAL)) {
continue;
}
- dict_stats_empty_index(index, false);
-
- if (dict_stats_should_ignore_index(index)) {
+ if (dict_stats_should_ignore_index(index)
+ || !index->is_readable()) {
+ mutex_enter(&dict_sys.mutex);
+ dict_stats_empty_index(index, false);
+ mutex_exit(&dict_sys.mutex);
continue;
}
- /* Do not continue if table decryption has failed or
- table is already marked as corrupted. */
- if (!index->is_readable()) {
- break;
- }
-
dict_stats_update_transient_for_index(index);
sum_of_index_sizes += index->stat_index_size;
}
+ mutex_enter(&dict_sys.mutex);
+
index = dict_table_get_first_index(table);
table->stat_n_rows = index->stat_n_diff_key_vals[
@@ -962,6 +973,8 @@ dict_stats_update_transient(
table->stat_modified_counter = 0;
table->stat_initialized = TRUE;
+
+ mutex_exit(&dict_sys.mutex);
}
/* @{ Pseudo code about the relation between the following functions
@@ -1806,16 +1819,31 @@ dict_stats_analyze_index_for_n_prefix(
btr_pcur_close(&pcur);
}
+/** statistics for an index */
+struct index_stats_t
+{
+ std::vector<index_field_stats_t> stats;
+ ulint index_size;
+ ulint n_leaf_pages;
+
+ index_stats_t(ulint n_uniq) : index_size(1), n_leaf_pages(1)
+ {
+ stats.reserve(n_uniq);
+ for (ulint i= 0; i < n_uniq; ++i)
+ stats.push_back(index_field_stats_t(0, 1, 0));
+ }
+};
+
/** Set dict_index_t::stat_n_diff_key_vals[] and stat_n_sample_sizes[].
@param[in] n_diff_data input data to use to derive the results
-@param[in,out] index index whose stat_n_diff_key_vals[] to set */
+@param[in,out] index_stats index stats to set */
UNIV_INLINE
void
dict_stats_index_set_n_diff(
const n_diff_data_t* n_diff_data,
- dict_index_t* index)
+ index_stats_t& index_stats)
{
- for (ulint n_prefix = dict_index_get_n_unique(index);
+ for (ulint n_prefix = index_stats.stats.size();
n_prefix >= 1;
n_prefix--) {
/* n_diff_all_analyzed_pages can be 0 here if
@@ -1846,14 +1874,14 @@ dict_stats_index_set_n_diff(
that the total number of ordinary leaf pages is
T * D / (D + E). */
n_ordinary_leaf_pages
- = index->stat_n_leaf_pages
+ = index_stats.n_leaf_pages
* data->n_leaf_pages_to_analyze
/ (data->n_leaf_pages_to_analyze
+ data->n_external_pages_sum);
}
/* See REF01 for an explanation of the algorithm */
- index->stat_n_diff_key_vals[n_prefix - 1]
+ index_stats.stats[n_prefix - 1].n_diff_key_vals
= n_ordinary_leaf_pages
* data->n_diff_on_level
@@ -1862,7 +1890,7 @@ dict_stats_index_set_n_diff(
* data->n_diff_all_analyzed_pages
/ data->n_leaf_pages_to_analyze;
- index->stat_n_sample_sizes[n_prefix - 1]
+ index_stats.stats[n_prefix - 1].n_sample_sizes
= data->n_leaf_pages_to_analyze;
DEBUG_PRINTF(" %s(): n_diff=" UINT64PF
@@ -1871,9 +1899,9 @@ dict_stats_index_set_n_diff(
" * " UINT64PF " / " UINT64PF
" * " UINT64PF " / " UINT64PF ")\n",
__func__,
- index->stat_n_diff_key_vals[n_prefix - 1],
+ index_stats.stats[n_prefix - 1].n_diff_key_vals,
n_prefix,
- index->stat_n_leaf_pages,
+ index_stats.n_leaf_pages,
data->n_diff_on_level,
data->n_recs_on_level,
data->n_diff_all_analyzed_pages,
@@ -1881,15 +1909,12 @@ dict_stats_index_set_n_diff(
}
}
-/*********************************************************************//**
-Calculates new statistics for a given index and saves them to the index
+/** Calculates new statistics for a given index and saves them to the index
members stat_n_diff_key_vals[], stat_n_sample_sizes[], stat_index_size and
-stat_n_leaf_pages. This function could be slow. */
-static
-void
-dict_stats_analyze_index(
-/*=====================*/
- dict_index_t* index) /*!< in/out: index to analyze */
+stat_n_leaf_pages. This function can be slow.
+@param[in] index index to analyze
+@return index stats */
+static index_stats_t dict_stats_analyze_index(dict_index_t* index)
{
ulint root_level;
ulint level;
@@ -1900,26 +1925,28 @@ dict_stats_analyze_index(
ib_uint64_t total_pages;
mtr_t mtr;
ulint size;
+ index_stats_t result(index->n_uniq);
DBUG_ENTER("dict_stats_analyze_index");
DBUG_PRINT("info", ("index: %s, online status: %d", index->name(),
dict_index_get_online_status(index)));
+ ut_ad(!mutex_own(&dict_sys.mutex)); // because this function is slow
+ ut_ad(index->table->get_ref_count());
+
/* Disable update statistic for Rtree */
if (dict_index_is_spatial(index)) {
- DBUG_VOID_RETURN;
+ DBUG_RETURN(result);
}
DEBUG_PRINTF(" %s(index=%s)\n", __func__, index->name());
- dict_stats_empty_index(index, false);
-
mtr.start();
mtr_s_lock_index(index, &mtr);
size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
if (size != ULINT_UNDEFINED) {
- index->stat_index_size = size;
+ result.index_size = size;
size = btr_get_size(index, BTR_N_LEAF_PAGES, &mtr);
}
@@ -1929,13 +1956,13 @@ dict_stats_analyze_index(
switch (size) {
case ULINT_UNDEFINED:
dict_stats_assert_initialized_index(index);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(result);
case 0:
/* The root node of the tree is a leaf */
size = 1;
}
- index->stat_n_leaf_pages = size;
+ result.n_leaf_pages = size;
mtr.start();
mtr_sx_lock_index(index, &mtr);
@@ -1974,14 +2001,18 @@ dict_stats_analyze_index(
NULL /* boundaries not needed */,
&mtr);
+ mtr.commit();
+
+ mutex_enter(&dict_sys.mutex);
for (ulint i = 0; i < n_uniq; i++) {
- index->stat_n_sample_sizes[i] = total_pages;
+ result.stats[i].n_diff_key_vals = index->stat_n_diff_key_vals[i];
+ result.stats[i].n_sample_sizes = total_pages;
+ result.stats[i].n_non_null_key_vals = index->stat_n_non_null_key_vals[i];
}
+ result.n_leaf_pages = index->stat_n_leaf_pages;
+ mutex_exit(&dict_sys.mutex);
- mtr.commit();
-
- dict_stats_assert_initialized_index(index);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(result);
}
/* For each level that is being scanned in the btree, this contains the
@@ -2173,13 +2204,12 @@ found_level:
/* n_prefix == 0 means that the above loop did not end up prematurely
due to tree being changed and so n_diff_data[] is set up. */
if (n_prefix == 0) {
- dict_stats_index_set_n_diff(n_diff_data, index);
+ dict_stats_index_set_n_diff(n_diff_data, result);
}
UT_DELETE_ARRAY(n_diff_data);
- dict_stats_assert_initialized_index(index);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(result);
}
/*********************************************************************//**
@@ -2197,7 +2227,7 @@ dict_stats_update_persistent(
DEBUG_PRINTF("%s(table=%s)\n", __func__, table->name);
- rw_lock_x_lock(&table->stats_latch);
+ DEBUG_SYNC_C("dict_stats_update_persistent");
/* analyze the clustered index first */
@@ -2208,7 +2238,6 @@ dict_stats_update_persistent(
|| (index->type | DICT_UNIQUE) != (DICT_CLUSTERED | DICT_UNIQUE)) {
/* Table definition is corrupt */
- rw_lock_x_unlock(&table->stats_latch);
dict_stats_empty_table(table, true);
return(DB_CORRUPTION);
@@ -2216,7 +2245,16 @@ dict_stats_update_persistent(
ut_ad(!dict_index_is_ibuf(index));
- dict_stats_analyze_index(index);
+ index_stats_t stats = dict_stats_analyze_index(index);
+
+ mutex_enter(&dict_sys.mutex);
+ index->stat_index_size = stats.index_size;
+ index->stat_n_leaf_pages = stats.n_leaf_pages;
+ for (size_t i = 0; i < stats.stats.size(); ++i) {
+ index->stat_n_diff_key_vals[i] = stats.stats[i].n_diff_key_vals;
+ index->stat_n_sample_sizes[i] = stats.stats[i].n_sample_sizes;
+ index->stat_n_non_null_key_vals[i] = stats.stats[i].n_non_null_key_vals;
+ }
ulint n_unique = dict_index_get_n_unique(index);
@@ -2234,7 +2272,7 @@ dict_stats_update_persistent(
ut_ad(!dict_index_is_ibuf(index));
- if (index->type & DICT_FTS || dict_index_is_spatial(index)) {
+ if (index->type & (DICT_FTS | DICT_SPATIAL)) {
continue;
}
@@ -2245,7 +2283,20 @@ dict_stats_update_persistent(
}
if (!(table->stats_bg_flag & BG_STAT_SHOULD_QUIT)) {
- dict_stats_analyze_index(index);
+ mutex_exit(&dict_sys.mutex);
+ stats = dict_stats_analyze_index(index);
+ mutex_enter(&dict_sys.mutex);
+
+ index->stat_index_size = stats.index_size;
+ index->stat_n_leaf_pages = stats.n_leaf_pages;
+ for (size_t i = 0; i < stats.stats.size(); ++i) {
+ index->stat_n_diff_key_vals[i]
+ = stats.stats[i].n_diff_key_vals;
+ index->stat_n_sample_sizes[i]
+ = stats.stats[i].n_sample_sizes;
+ index->stat_n_non_null_key_vals[i]
+ = stats.stats[i].n_non_null_key_vals;
+ }
}
table->stat_sum_of_other_index_sizes
@@ -2260,7 +2311,7 @@ dict_stats_update_persistent(
dict_stats_assert_initialized(table);
- rw_lock_x_unlock(&table->stats_latch);
+ mutex_exit(&dict_sys.mutex);
return(DB_SUCCESS);
}
@@ -3077,11 +3128,22 @@ dict_stats_update_for_index(
if (dict_stats_is_persistent_enabled(index->table)) {
if (dict_stats_persistent_storage_check(false)) {
- rw_lock_x_lock(&index->table->stats_latch);
- dict_stats_analyze_index(index);
+ index_stats_t stats = dict_stats_analyze_index(index);
+ mutex_enter(&dict_sys.mutex);
+ index->stat_index_size = stats.index_size;
+ index->stat_n_leaf_pages = stats.n_leaf_pages;
+ for (size_t i = 0; i < stats.stats.size(); ++i) {
+ index->stat_n_diff_key_vals[i]
+ = stats.stats[i].n_diff_key_vals;
+ index->stat_n_sample_sizes[i]
+ = stats.stats[i].n_sample_sizes;
+ index->stat_n_non_null_key_vals[i]
+ = stats.stats[i].n_non_null_key_vals;
+ }
index->table->stat_sum_of_other_index_sizes
+= index->stat_index_size;
- rw_lock_x_unlock(&index->table->stats_latch);
+ mutex_exit(&dict_sys.mutex);
+
dict_stats_save(index->table, &index->id);
DBUG_VOID_RETURN;
}
@@ -3102,9 +3164,7 @@ dict_stats_update_for_index(
}
}
- rw_lock_x_lock(&index->table->stats_latch);
dict_stats_update_transient_for_index(index);
- rw_lock_x_unlock(&index->table->stats_latch);
DBUG_VOID_RETURN;
}
@@ -3258,7 +3318,7 @@ dict_stats_update(
switch (err) {
case DB_SUCCESS:
- rw_lock_x_lock(&table->stats_latch);
+ mutex_enter(&dict_sys.mutex);
/* Pass reset_ignored_indexes=true as parameter
to dict_stats_copy. This will cause statictics
@@ -3267,7 +3327,7 @@ dict_stats_update(
dict_stats_assert_initialized(table);
- rw_lock_x_unlock(&table->stats_latch);
+ mutex_exit(&dict_sys.mutex);
dict_stats_table_clone_free(t);
@@ -3321,13 +3381,8 @@ dict_stats_update(
}
transient:
-
- rw_lock_x_lock(&table->stats_latch);
-
dict_stats_update_transient(table);
- rw_lock_x_unlock(&table->stats_latch);
-
return(DB_SUCCESS);
}
diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc
index 225561917a8..0d3bb1b0c0a 100644
--- a/storage/innobase/fil/fil0crypt.cc
+++ b/storage/innobase/fil/fil0crypt.cc
@@ -406,6 +406,8 @@ fil_space_crypt_t::write_page0(
mlog_write_ulint(page + offset + MAGIC_SZ + 2 + len + 8, encryption,
MLOG_1BYTE, mtr);
+ DBUG_EXECUTE_IF("ib_do_not_log_crypt_data", return;);
+
byte* log_ptr = mlog_open(mtr, 11 + 17 + len);
if (log_ptr != NULL) {
diff --git a/storage/innobase/fts/fts0ast.cc b/storage/innobase/fts/fts0ast.cc
index e22613a265b..6be4fb0d52b 100644
--- a/storage/innobase/fts/fts0ast.cc
+++ b/storage/innobase/fts/fts0ast.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2018, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2020, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
@@ -557,8 +557,7 @@ fts_ast_node_check_union(
fts_ast_node_t* node)
{
if (node->type == FTS_AST_LIST
- || node->type == FTS_AST_SUBEXP_LIST
- || node->type == FTS_AST_PARSER_PHRASE_LIST) {
+ || node->type == FTS_AST_SUBEXP_LIST) {
for (node = node->list.head; node; node = node->next) {
if (!fts_ast_node_check_union(node)) {
@@ -566,6 +565,9 @@ fts_ast_node_check_union(
}
}
+ } else if (node->type == FTS_AST_PARSER_PHRASE_LIST) {
+ /* Phrase search for plugin parser */
+ return(false);
} else if (node->type == FTS_AST_OPER
&& (node->oper == FTS_IGNORE
|| node->oper == FTS_EXIST)) {
diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc
index a3e36f7d0da..8f212ff6676 100644
--- a/storage/innobase/fts/fts0que.cc
+++ b/storage/innobase/fts/fts0que.cc
@@ -4421,24 +4421,27 @@ fts_phrase_or_proximity_search(
if (k == ib_vector_size(query->match_array[j])) {
end_list = TRUE;
- if (match[j]->doc_id != match[0]->doc_id) {
- /* no match */
- if (query->flags & FTS_PHRASE) {
- ulint s;
+ if (query->flags & FTS_PHRASE) {
+ ulint s;
+ /* Since i is the last doc id in the
+ match_array[j], remove all doc ids > i
+ from the match_array[0]. */
+ fts_match_t* match_temp;
+ for (s = i + 1; s < n_matched; s++) {
+ match_temp = static_cast<
+ fts_match_t*>(ib_vector_get(
+ query->match_array[0], s));
+ match_temp->doc_id = 0;
+ }
+ if (match[j]->doc_id !=
+ match[0]->doc_id) {
+ /* no match */
match[0]->doc_id = 0;
-
- for (s = i + 1; s < n_matched;
- s++) {
- match[0] = static_cast<
- fts_match_t*>(
- ib_vector_get(
- query->match_array[0],
- s));
- match[0]->doc_id = 0;
- }
}
+ }
+ if (match[j]->doc_id != match[0]->doc_id) {
goto func_exit;
}
}
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index b90e0444f45..849e080728f 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -1242,6 +1242,24 @@ rtr_check_discard_page(
lock_mutex_exit();
}
+/** Structure acts as functor to get the optimistic access of the page.
+It returns true if it successfully gets the page. */
+struct optimistic_get
+{
+ btr_pcur_t *const r_cursor;
+ mtr_t *const mtr;
+
+ optimistic_get(btr_pcur_t *r_cursor,mtr_t *mtr)
+ :r_cursor(r_cursor), mtr(mtr) {}
+
+ bool operator()(buf_block_t *hint) const
+ {
+ return hint && buf_page_optimistic_get(
+ RW_X_LATCH, hint, r_cursor->modify_clock, __FILE__,
+ __LINE__, mtr);
+ }
+};
+
/** Restore the stored position of a persistent cursor bufferfixing the page */
static
bool
@@ -1275,11 +1293,8 @@ rtr_cur_restore_position(
ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
- if (!buf_pool_is_obsolete(r_cursor->withdraw_clock)
- && buf_page_optimistic_get(RW_X_LATCH,
- r_cursor->block_when_stored,
- r_cursor->modify_clock,
- __FILE__, __LINE__, mtr)) {
+ if (r_cursor->block_when_stored.run_with_hint(
+ optimistic_get(r_cursor, mtr))) {
ut_ad(r_cursor->pos_state == BTR_PCUR_IS_POSITIONED);
ut_ad(r_cursor->rel_pos == BTR_PCUR_ON);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 519b86c8716..0585aaacb46 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -234,6 +234,27 @@ enum default_row_format_enum {
DEFAULT_ROW_FORMAT_DYNAMIC = 2,
};
+/** A dummy variable */
+static uint innodb_max_purge_lag_wait;
+
+/** Wait for trx_sys_t::rseg_history_len to be below a limit. */
+static void innodb_max_purge_lag_wait_update(THD *thd, st_mysql_sys_var *,
+ void *, const void *limit)
+{
+ const uint l= *static_cast<const uint*>(limit);
+ if (trx_sys.rseg_history_len <= l)
+ return;
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ while (trx_sys.rseg_history_len > l)
+ {
+ if (thd_kill_level(thd))
+ break;
+ srv_wake_purge_thread_if_not_active();
+ os_thread_sleep(100000);
+ }
+ mysql_mutex_lock(&LOCK_global_system_variables);
+}
+
static
void set_my_errno(int err)
{
@@ -656,7 +677,6 @@ static PSI_rwlock_info all_innodb_rwlocks[] = {
PSI_RWLOCK_KEY(trx_purge_latch),
PSI_RWLOCK_KEY(index_tree_rw_lock),
PSI_RWLOCK_KEY(index_online_log),
- PSI_RWLOCK_KEY(dict_table_stats),
PSI_RWLOCK_KEY(hash_table_locks)
};
# endif /* UNIV_PFS_RWLOCK */
@@ -14103,6 +14123,8 @@ ha_innobase::info_low(
DEBUG_SYNC_C("ha_innobase_info_low");
+ ut_ad(!mutex_own(&dict_sys.mutex));
+
/* If we are forcing recovery at a high level, we will suppress
statistics calculation on tables, because that may crash the
server if an index is badly corrupted. */
@@ -14139,7 +14161,6 @@ ha_innobase::info_low(
opt = DICT_STATS_RECALC_TRANSIENT;
}
- ut_ad(!mutex_own(&dict_sys.mutex));
ret = dict_stats_update(ib_table, opt);
if (ret != DB_SUCCESS) {
@@ -14155,14 +14176,14 @@ ha_innobase::info_low(
stats.update_time = (ulong) ib_table->update_time;
}
+ DBUG_EXECUTE_IF("dict_sys_mutex_avoid", goto func_exit;);
+
if (flag & HA_STATUS_VARIABLE) {
ulint stat_clustered_index_size;
ulint stat_sum_of_other_index_sizes;
- if (!(flag & HA_STATUS_NO_LOCK)) {
- rw_lock_s_lock(&ib_table->stats_latch);
- }
+ mutex_enter(&dict_sys.mutex);
ut_a(ib_table->stat_initialized);
@@ -14174,9 +14195,7 @@ ha_innobase::info_low(
stat_sum_of_other_index_sizes
= ib_table->stat_sum_of_other_index_sizes;
- if (!(flag & HA_STATUS_NO_LOCK)) {
- rw_lock_s_unlock(&ib_table->stats_latch);
- }
+ mutex_exit(&dict_sys.mutex);
/*
The MySQL optimizer seems to assume in a left join that n_rows
@@ -14278,10 +14297,26 @@ ha_innobase::info_low(
ib_push_frm_error(m_user_thd, ib_table, table, num_innodb_index, true);
}
- if (!(flag & HA_STATUS_NO_LOCK)) {
- rw_lock_s_lock(&ib_table->stats_latch);
+ snprintf(path, sizeof(path), "%s/%s%s",
+ mysql_data_home, table->s->normalized_path.str,
+ reg_ext);
+
+ unpack_filename(path,path);
+
+ /* Note that we do not know the access time of the table,
+ nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
+
+ if (os_file_get_status(
+ path, &stat_info, false,
+ srv_read_only_mode) == DB_SUCCESS) {
+ stats.create_time = (ulong) stat_info.ctime;
}
+ struct Locking {
+ Locking() { mutex_enter(&dict_sys.mutex); }
+ ~Locking() { mutex_exit(&dict_sys.mutex); }
+ } locking;
+
ut_a(ib_table->stat_initialized);
for (i = 0; i < table->s->keys; i++) {
@@ -14359,25 +14394,6 @@ ha_innobase::info_low(
key->rec_per_key[j] = rec_per_key_int;
}
}
-
- if (!(flag & HA_STATUS_NO_LOCK)) {
- rw_lock_s_unlock(&ib_table->stats_latch);
- }
-
- snprintf(path, sizeof(path), "%s/%s%s",
- mysql_data_home, table->s->normalized_path.str,
- reg_ext);
-
- unpack_filename(path,path);
-
- /* Note that we do not know the access time of the table,
- nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
-
- if (os_file_get_status(
- path, &stat_info, false,
- srv_read_only_mode) == DB_SUCCESS) {
- stats.create_time = (ulong) stat_info.ctime;
- }
}
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
@@ -19021,6 +19037,11 @@ static MYSQL_SYSVAR_ULONG(max_purge_lag_delay, srv_max_purge_lag_delay,
0L, /* Minimum value */
10000000UL, 0); /* Maximum value */
+static MYSQL_SYSVAR_UINT(max_purge_lag_wait, innodb_max_purge_lag_wait,
+ PLUGIN_VAR_RQCMDARG,
+ "Wait until History list length is below the specified limit",
+ NULL, innodb_max_purge_lag_wait_update, UINT_MAX, 0, UINT_MAX, 0);
+
static MYSQL_SYSVAR_BOOL(rollback_on_timeout, innobase_rollback_on_timeout,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)",
@@ -19119,10 +19140,10 @@ static MYSQL_SYSVAR_BOOL(log_compressed_pages, page_zip_log_pages,
static MYSQL_SYSVAR_BOOL(log_optimize_ddl, innodb_log_optimize_ddl,
PLUGIN_VAR_OPCMDARG,
- "Reduce redo logging when natively creating indexes or rebuilding tables."
- " Setting this OFF avoids delay due to page flushing and"
- " allows concurrent backup.",
- NULL, NULL, TRUE);
+ "DEPRECATED. Ignored in MariaDB 10.5."
+ " Reduce redo logging when natively creating indexes or rebuilding tables."
+ " Enabling this may slow down backup and cause delay due to page flushing.",
+ NULL, NULL, FALSE);
static MYSQL_SYSVAR_ULONG(autoextend_increment,
sys_tablespace_auto_extend_increment,
@@ -20041,6 +20062,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(flushing_avg_loops),
MYSQL_SYSVAR(max_purge_lag),
MYSQL_SYSVAR(max_purge_lag_delay),
+ MYSQL_SYSVAR(max_purge_lag_wait),
MYSQL_SYSVAR(old_blocks_pct),
MYSQL_SYSVAR(old_blocks_time),
MYSQL_SYSVAR(open_files),
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 8127effb304..fb3e79b4f89 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -6267,38 +6267,43 @@ i_s_dict_fill_sys_tablestats(
OK(field_store_string(fields[SYS_TABLESTATS_NAME],
table->name.m_name));
- rw_lock_s_lock(&table->stats_latch);
+ {
+ struct Locking
+ {
+ Locking() { mutex_enter(&dict_sys.mutex); }
+ ~Locking() { mutex_exit(&dict_sys.mutex); }
+ } locking;
- if (table->stat_initialized) {
- OK(field_store_string(fields[SYS_TABLESTATS_INIT],
- "Initialized"));
+ if (table->stat_initialized) {
+ OK(field_store_string(fields[SYS_TABLESTATS_INIT],
+ "Initialized"));
- OK(fields[SYS_TABLESTATS_NROW]->store(table->stat_n_rows,
- true));
+ OK(fields[SYS_TABLESTATS_NROW]->store(
+ table->stat_n_rows, true));
- OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(
- table->stat_clustered_index_size, true));
+ OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(
+ table->stat_clustered_index_size, true));
- OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(
- table->stat_sum_of_other_index_sizes, true));
+ OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(
+ table->stat_sum_of_other_index_sizes,
+ true));
- OK(fields[SYS_TABLESTATS_MODIFIED]->store(
- table->stat_modified_counter, true));
- } else {
- OK(field_store_string(fields[SYS_TABLESTATS_INIT],
- "Uninitialized"));
+ OK(fields[SYS_TABLESTATS_MODIFIED]->store(
+ table->stat_modified_counter, true));
+ } else {
+ OK(field_store_string(fields[SYS_TABLESTATS_INIT],
+ "Uninitialized"));
- OK(fields[SYS_TABLESTATS_NROW]->store(0, true));
+ OK(fields[SYS_TABLESTATS_NROW]->store(0, true));
- OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(0, true));
+ OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(0, true));
- OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(0, true));
+ OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(0, true));
- OK(fields[SYS_TABLESTATS_MODIFIED]->store(0, true));
+ OK(fields[SYS_TABLESTATS_MODIFIED]->store(0, true));
+ }
}
- rw_lock_s_unlock(&table->stats_latch);
-
OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, true));
OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(ref_count, true));
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index 29382bb033f..fe845005536 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -214,7 +214,7 @@ the index.
ulint
btr_height_get(
/*===========*/
- dict_index_t* index, /*!< in: index tree */
+ const dict_index_t* index, /*!< in: index tree */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((warn_unused_result));
@@ -592,7 +592,7 @@ Gets the number of pages in a B-tree.
ulint
btr_get_size(
/*=========*/
- dict_index_t* index, /*!< in: index */
+ const dict_index_t* index, /*!< in: index */
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
mtr_t* mtr) /*!< in/out: mini-transaction where index
is s-latched */
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 12aaa73ae30..1f1ac2a09e7 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -614,8 +614,24 @@ btr_estimate_n_rows_in_range(
const dtuple_t* tuple2,
page_cur_mode_t mode2);
-/*******************************************************************//**
-Estimates the number of different key values in a given index, for
+
+/** Statistics for one field of an index. */
+struct index_field_stats_t
+{
+ ib_uint64_t n_diff_key_vals;
+ ib_uint64_t n_sample_sizes;
+ ib_uint64_t n_non_null_key_vals;
+
+ index_field_stats_t(ib_uint64_t n_diff_key_vals= 0,
+ ib_uint64_t n_sample_sizes= 0,
+ ib_uint64_t n_non_null_key_vals= 0)
+ : n_diff_key_vals(n_diff_key_vals), n_sample_sizes(n_sample_sizes),
+ n_non_null_key_vals(n_non_null_key_vals)
+ {
+ }
+};
+
+/** Estimates the number of different key values in a given index, for
each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed
0..n_uniq-1) and the number of pages that were sampled is saved in
@@ -623,12 +639,11 @@ index->stat_n_sample_sizes[].
If innodb_stats_method is nulls_ignored, we also record the number of
non-null values for each prefix and stored the estimates in
array index->stat_n_non_null_key_vals.
-@return true if the index is available and we get the estimated numbers,
-false if the index is unavailable. */
-bool
-btr_estimate_number_of_different_key_vals(
-/*======================================*/
- dict_index_t* index); /*!< in: index */
+@param[in] index index
+@return stat vector if the index is available and we get the estimated numbers,
+empty vector if the index is unavailable. */
+std::vector<index_field_stats_t>
+btr_estimate_number_of_different_key_vals(dict_index_t* index);
/** Gets the externally stored size of a record, in units of a database page.
@param[in] rec record
diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index c20b971de98..38960b1d15c 100644
--- a/storage/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
@@ -29,6 +29,7 @@ Created 2/23/1996 Heikki Tuuri
#include "dict0dict.h"
#include "btr0cur.h"
+#include "buf0block_hint.h"
#include "btr0btr.h"
#include "gis0rtree.h"
@@ -502,13 +503,10 @@ struct btr_pcur_t{
whether cursor was on, before, or after the old_rec record */
enum btr_pcur_pos_t rel_pos;
/** buffer block when the position was stored */
- buf_block_t* block_when_stored;
+ buf::Block_hint block_when_stored;
/** the modify clock value of the buffer block when the cursor position
was stored */
ib_uint64_t modify_clock;
- /** the withdraw clock value of the buffer pool when the cursor
- position was stored */
- ulint withdraw_clock;
/** btr_pcur_store_position() and btr_pcur_restore_position() state. */
enum pcur_pos_t pos_state;
/** PAGE_CUR_G, ... */
@@ -528,9 +526,8 @@ struct btr_pcur_t{
btr_pcur_t() :
btr_cur(), latch_mode(0), old_stored(false), old_rec(NULL),
old_n_fields(0), rel_pos(btr_pcur_pos_t(0)),
- block_when_stored(NULL),
- modify_clock(0), withdraw_clock(0),
- pos_state(BTR_PCUR_NOT_POSITIONED),
+ block_when_stored(),
+ modify_clock(0), pos_state(BTR_PCUR_NOT_POSITIONED),
search_mode(PAGE_CUR_UNSUPP), trx_if_known(NULL),
old_rec_buf(NULL), buf_size(0)
{
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 2fa9aaa38fe..adb14a7c16f 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -206,8 +206,6 @@ struct btr_search_t{
the machine word, i.e., they cannot be turned into bit-fields. */
buf_block_t* root_guess;/*!< the root page frame when it was last time
fetched, or NULL */
- ulint withdraw_clock; /*!< the withdraw clock value of the buffer
- pool when root_guess was stored */
#ifdef BTR_CUR_HASH_ADAPT
ulint hash_analysis; /*!< when this exceeds
BTR_SEARCH_HASH_ANALYSIS, the hash
diff --git a/storage/innobase/include/buf0block_hint.h b/storage/innobase/include/buf0block_hint.h
new file mode 100644
index 00000000000..2d681175b25
--- /dev/null
+++ b/storage/innobase/include/buf0block_hint.h
@@ -0,0 +1,77 @@
+/*****************************************************************************
+
+Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2020, MariaDB Corporation.
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License, version 2.0, as published by the
+Free Software Foundation.
+
+This program is also distributed with certain software (including but not
+limited to OpenSSL) that is licensed under separate terms, as designated in a
+particular file or component or in included license documentation. The authors
+of MySQL hereby grant you an additional permission to link the program and
+your derivative works with the separately licensed software that they have
+included with MySQL.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
+for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+*****************************************************************************/
+#pragma once
+#include "buf0buf.h"
+
+namespace buf {
+class Block_hint {
+ public:
+ Block_hint():m_block(NULL),m_page_id(0,0) {}
+ /** Stores the pointer to the block, which is currently buffer-fixed.
+ @param block a pointer to a buffer-fixed block to be stored */
+ inline void store(buf_block_t *block)
+ {
+ ut_ad(block->page.buf_fix_count);
+ m_block= block;
+ m_page_id= block->page.id;
+ }
+
+ /** Clears currently stored pointer. */
+ inline void clear() { m_block= NULL; }
+
+ /** Invoke f on m_block(which may be null)
+ @param f The function to be executed. It will be passed the pointer.
+ If you wish to use the block pointer subsequently,
+ you need to ensure you buffer-fix it before returning from f.
+ @return the return value of f
+ */
+ template <typename F>
+ bool run_with_hint(const F &f)
+ {
+ buffer_fix_block_if_still_valid();
+ /* m_block could be changed during f() call, so we use local
+ variable to remember which block we need to unfix */
+ buf_block_t *block= m_block;
+ bool res= f(block);
+ if (block)
+ buf_block_buf_fix_dec(block);
+ return res;
+ }
+
+ buf_block_t *block() const { return m_block; }
+
+ private:
+ /** The block pointer stored by store(). */
+ buf_block_t *m_block;
+ /** If m_block is non-null, the m_block->page.id at time it was stored. */
+ page_id_t m_page_id;
+
+ /** A helper function which checks if m_block is not a dangling pointer and
+ still points to block with page with m_page_id and if so, buffer-fixes it,
+ otherwise clear()s it */
+ void buffer_fix_block_if_still_valid();
+};
+} // namespace buf
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 2c8421c8bf4..28060ec601e 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -100,10 +100,6 @@ extern buf_pool_t* buf_pool_ptr; /*!< The buffer pools
extern volatile bool buf_pool_withdrawing; /*!< true when withdrawing buffer
pool pages might cause page relocation */
-extern volatile ulint buf_withdraw_clock; /*!< the clock is incremented
- every time a pointer to a page may
- become obsolete */
-
# ifdef UNIV_DEBUG
extern my_bool buf_disable_resize_buffer_pool_debug; /*!< if TRUE, resizing
buffer pool is not allowed. */
@@ -1394,14 +1390,6 @@ buf_get_nth_chunk_block(
ulint n, /*!< in: nth chunk in the buffer pool */
ulint* chunk_size); /*!< in: chunk size */
-/** Verify the possibility that a stored page is not in buffer pool.
-@param[in] withdraw_clock withdraw clock when stored the page
-@retval true if the page might be relocated */
-UNIV_INLINE
-bool
-buf_pool_is_obsolete(
- ulint withdraw_clock);
-
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed.
@param[in] size size in bytes
@@ -2284,6 +2272,12 @@ struct buf_pool_t{
return NULL;
}
} io_buf;
+
+ /** Determine if a pointer belongs to a buf_block_t.
+ It can be a pointer to the buf_block_t itself or a member of it.
+ @param ptr a pointer that will not be dereferenced
+ @return whether the ptr belongs to a buf_block_t struct */
+ inline bool is_block_field(const void *ptr) const;
};
/** Print the given buf_pool_t object.
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 7d11e2b4cc0..9fcac6e2695 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2014, 2019, MariaDB Corporation.
+Copyright (c) 2014, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -54,6 +54,25 @@ struct buf_chunk_t{
}
};
+bool buf_pool_t::is_block_field(const void *ptr) const
+{
+ const buf_chunk_t* chunk= chunks;
+ const buf_chunk_t *const echunk= chunk + ut_min(n_chunks,
+ n_chunks_new);
+ /* TODO: protect chunks with a mutex (the older pointer will
+ currently remain during resize()) */
+ while (chunk < echunk)
+ {
+ if (ptr >= reinterpret_cast<const void*>(chunk->blocks) &&
+ ptr < reinterpret_cast<const void*>(
+ chunk->blocks + chunk->size))
+ return true;
+ chunk++;
+ }
+
+ return false;
+}
+
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@@ -989,8 +1008,6 @@ buf_block_buf_fix_dec(
/*==================*/
buf_block_t* block) /*!< in/out: block to bufferunfix */
{
- block->unfix();
-
#ifdef UNIV_DEBUG
/* No debug latch is acquired if block belongs to system temporary.
Debug latch is not of much help if access to block is single
@@ -999,6 +1016,8 @@ buf_block_buf_fix_dec(
rw_lock_s_unlock(block->debug_latch);
}
#endif /* UNIV_DEBUG */
+
+ block->unfix();
}
/** Returns the buffer pool instance given a page id.
@@ -1372,18 +1391,6 @@ buf_page_get_frame(
}
}
-/** Verify the possibility that a stored page is not in buffer pool.
-@param[in] withdraw_clock withdraw clock when stored the page
-@retval true if the page might be relocated */
-UNIV_INLINE
-bool
-buf_pool_is_obsolete(
- ulint withdraw_clock)
-{
- return(UNIV_UNLIKELY(buf_pool_withdrawing
- || buf_withdraw_clock != withdraw_clock));
-}
-
/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
if needed.
@param[in] size size in bytes
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index c5f1ef96ea6..6bb86269856 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2019, MariaDB Corporation.
+Copyright (c) 2013, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1265,7 +1265,7 @@ UNIV_INLINE
rw_lock_t*
dict_index_get_lock(
/*================*/
- dict_index_t* index) /*!< in: index */
+ const dict_index_t* index) /*!< in: index */
MY_ATTRIBUTE((nonnull, warn_unused_result));
/********************************************************************//**
Returns free space reserved for future updates of records. This is
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index b6d15f28a69..d00f4f8f6a3 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -938,7 +938,7 @@ UNIV_INLINE
rw_lock_t*
dict_index_get_lock(
/*================*/
- dict_index_t* index) /*!< in: index */
+ const dict_index_t* index) /*!< in: index */
{
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 929311bbc90..c6a506472df 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -297,20 +297,23 @@ parent table will fail, and user has to drop excessive foreign constraint
before proceeds. */
#define FK_MAX_CASCADE_DEL 15
-/**********************************************************************//**
-Creates a table memory object.
+/** Create a table memory object.
+@param name table name
+@param space tablespace
+@param n_cols total number of columns (both virtual and non-virtual)
+@param n_v_cols number of virtual columns
+@param flags table flags
+@param flags2 table flags2
@return own: table object */
dict_table_t*
dict_mem_table_create(
-/*==================*/
- const char* name, /*!< in: table name */
- fil_space_t* space, /*!< in: tablespace */
- ulint n_cols, /*!< in: total number of columns
- including virtual and non-virtual
- columns */
- ulint n_v_cols, /*!< in: number of virtual columns */
- ulint flags, /*!< in: table flags */
- ulint flags2); /*!< in: table flags2 */
+ const char* name,
+ fil_space_t* space,
+ ulint n_cols,
+ ulint n_v_cols,
+ ulint flags,
+ ulint flags2);
+
/****************************************************************//**
Free a table memory object. */
void
@@ -1118,7 +1121,7 @@ public:
when InnoDB was started up */
zip_pad_info_t zip_pad;/*!< Information about state of
compression failures and successes */
- rw_lock_t lock; /*!< read-write lock protecting the
+ mutable rw_lock_t lock; /*!< read-write lock protecting the
upper levels of the index tree */
/** Determine if the index has been committed to the
@@ -2102,20 +2105,8 @@ public:
/*!< set of foreign key constraints which refer to this table */
dict_foreign_set referenced_set;
- /** Statistics for query optimization. @{ */
-
- /** This latch protects:
- dict_table_t::stat_initialized,
- dict_table_t::stat_n_rows (*),
- dict_table_t::stat_clustered_index_size,
- dict_table_t::stat_sum_of_other_index_sizes,
- dict_table_t::stat_modified_counter (*),
- dict_table_t::indexes*::stat_n_diff_key_vals[],
- dict_table_t::indexes*::stat_index_size,
- dict_table_t::indexes*::stat_n_leaf_pages.
- (*) Those are not always protected for
- performance reasons. */
- rw_lock_t stats_latch;
+ /** Statistics for query optimization. Mostly protected by
+ dict_sys.mutex. @{ */
/** TRUE if statistics have been calculated the first time after
database startup or table creation. */
diff --git a/storage/innobase/include/dict0stats.ic b/storage/innobase/include/dict0stats.ic
index 34e5aedb127..d4e23ecb0a4 100644
--- a/storage/innobase/include/dict0stats.ic
+++ b/storage/innobase/include/dict0stats.ic
@@ -75,7 +75,7 @@ dict_stats_is_persistent_enabled(const dict_table_t* table)
+ dict_stats_update(DICT_STATS_RECALC_TRANSIENT) on a table that has
just been PS-enabled.
This is acceptable. Avoiding this would mean that we would have to
- protect the ::stat_persistent with dict_table_t::stats_latch like the
+ protect the stat_persistent with dict_sys.mutex like the
other ::stat_ members which would be too big performance penalty,
especially when this function is called from
dict_stats_update_if_needed(). */
@@ -178,10 +178,7 @@ dict_stats_deinit(
ut_a(table->get_ref_count() == 0);
- rw_lock_x_lock(&table->stats_latch);
-
if (!table->stat_initialized) {
- rw_lock_x_unlock(&table->stats_latch);
return;
}
@@ -221,6 +218,4 @@ dict_stats_deinit(
sizeof(index->stat_n_leaf_pages));
}
#endif /* HAVE_valgrind */
-
- rw_lock_x_unlock(&table->stats_latch);
}
diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic
index fe76d4cc271..deb1e977404 100644
--- a/storage/innobase/include/mtr0mtr.ic
+++ b/storage/innobase/include/mtr0mtr.ic
@@ -170,10 +170,10 @@ mtr_t::release_block_at_savepoint(
ut_a(slot->object == block);
- reinterpret_cast<buf_block_t*>(block)->unfix();
-
buf_page_release_latch(block, slot->type);
+ reinterpret_cast<buf_block_t*>(block)->unfix();
+
slot->object = NULL;
}
diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h
index f930320b30c..c9cf963b840 100644
--- a/storage/innobase/include/sync0sync.h
+++ b/storage/innobase/include/sync0sync.h
@@ -122,7 +122,6 @@ extern mysql_pfs_key_t trx_i_s_cache_lock_key;
extern mysql_pfs_key_t trx_purge_latch_key;
extern mysql_pfs_key_t index_tree_rw_lock_key;
extern mysql_pfs_key_t index_online_log_key;
-extern mysql_pfs_key_t dict_table_stats_key;
extern mysql_pfs_key_t trx_sys_rw_lock_key;
extern mysql_pfs_key_t hash_table_locks_key;
#endif /* UNIV_PFS_RWLOCK */
diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h
index 2da9256c77d..6ba457cdc40 100644
--- a/storage/innobase/include/trx0sys.h
+++ b/storage/innobase/include/trx0sys.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index ce92e5de5e1..604560af3e9 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -360,8 +360,6 @@ struct trx_undo_t {
(IB_ID_MAX if the undo log is empty) */
buf_block_t* guess_block; /*!< guess for the buffer block where
the top page might reside */
- ulint withdraw_clock; /*!< the withdraw clock value of the
- buffer pool when guess_block was stored */
/** @return whether the undo log is empty */
bool empty() const { return top_undo_no == IB_ID_MAX; }
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 15d7cc4b5a4..5ca74659813 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -233,8 +233,8 @@ static void memo_slot_release(mtr_memo_slot_t *slot)
case MTR_MEMO_PAGE_SX_FIX:
case MTR_MEMO_PAGE_X_FIX:
buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object);
- block->unfix();
buf_page_release_latch(block, slot->type);
+ block->unfix();
break;
}
slot->object= NULL;
@@ -276,8 +276,8 @@ struct ReleaseLatches {
case MTR_MEMO_PAGE_SX_FIX:
case MTR_MEMO_PAGE_X_FIX:
buf_block_t *block= reinterpret_cast<buf_block_t*>(slot->object);
- block->unfix();
buf_page_release_latch(block, slot->type);
+ block->unfix();
break;
}
slot->object= NULL;
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index a4359b9f7ad..ea0719a3fc8 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -1523,7 +1523,7 @@ error_exit:
srv_stats.n_rows_inserted.inc(size_t(trx->id));
}
- /* Not protected by dict_table_stats_lock() for performance
+ /* Not protected by dict_sys.mutex for performance
reasons, we would rather get garbage in stat_n_rows (which is
just an estimate anyway) than protecting the following code
with a latch. */
@@ -1893,7 +1893,7 @@ row_update_for_mysql(row_prebuilt_t* prebuilt)
ut_ad(is_delete == (node->is_delete == PLAIN_DELETE));
if (is_delete) {
- /* Not protected by dict_table_stats_lock() for performance
+ /* Not protected by dict_sys.mutex for performance
reasons, we would rather get garbage in stat_n_rows (which is
just an estimate anyway) than protecting the following code
with a latch. */
@@ -2244,8 +2244,7 @@ row_update_cascade_for_mysql(
bool stats;
if (node->is_delete == PLAIN_DELETE) {
- /* Not protected by
- dict_table_stats_lock() for
+ /* Not protected by dict_sys.mutex for
performance reasons, we would rather
get garbage in stat_n_rows (which is
just an estimate anyway) than
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index ed5f2c069da..b199469e5d5 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -589,7 +589,7 @@ row_undo_ins(
}
if (err == DB_SUCCESS && node->table->stat_initialized) {
- /* Not protected by dict_table_stats_lock() for
+ /* Not protected by dict_sys.mutex for
performance reasons, we would rather get garbage
in stat_n_rows (which is just an estimate anyway)
than protecting the following code with a latch. */
diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc
index f2c7e3abd13..a00c9cef5af 100644
--- a/storage/innobase/sync/sync0debug.cc
+++ b/storage/innobase/sync/sync0debug.cc
@@ -1446,9 +1446,6 @@ sync_latch_meta_init()
LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
- LATCH_ADD_RWLOCK(DICT_TABLE_STATS, SYNC_INDEX_TREE,
- dict_table_stats_key);
-
LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
hash_table_locks_key);
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index 99688964387..e7be502632d 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -97,7 +97,6 @@ mysql_pfs_key_t buf_block_debug_latch_key;
# endif /* UNIV_DEBUG */
mysql_pfs_key_t checkpoint_lock_key;
mysql_pfs_key_t dict_operation_lock_key;
-mysql_pfs_key_t dict_table_stats_key;
mysql_pfs_key_t hash_table_locks_key;
mysql_pfs_key_t index_tree_rw_lock_key;
mysql_pfs_key_t index_online_log_key;
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 310d071d368..eb7d0967901 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -2030,7 +2030,6 @@ dberr_t trx_undo_report_rename(trx_t* trx, const dict_table_t* table)
if (ulint offset = trx_undo_page_report_rename(
trx, table, block, &mtr)) {
- undo->withdraw_clock = buf_withdraw_clock;
undo->top_page_no = undo->last_page_no;
undo->top_offset = offset;
undo->top_undo_no = trx->undo_no++;
@@ -2170,7 +2169,6 @@ trx_undo_report_row_operation(
mtr_commit(&mtr);
} else {
/* Success */
- undo->withdraw_clock = buf_withdraw_clock;
mtr_commit(&mtr);
undo->top_page_no = undo_block->page.id.page_no();
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index da219cf6139..6128044bbc9 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -1223,7 +1223,6 @@ trx_undo_mem_create(
undo->top_undo_no = IB_ID_MAX;
undo->top_page_no = page_no;
undo->guess_block = NULL;
- undo->withdraw_clock = 0;
ut_ad(undo->empty());
return(undo);
@@ -1412,9 +1411,7 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
if (undo) {
return buf_page_get_gen(
page_id_t(undo->rseg->space->id, undo->last_page_no),
- 0, RW_X_LATCH,
- buf_pool_is_obsolete(undo->withdraw_clock)
- ? NULL : undo->guess_block,
+ 0, RW_X_LATCH, undo->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
}
@@ -1468,9 +1465,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
if (*undo) {
return buf_page_get_gen(
page_id_t(rseg->space->id, (*undo)->last_page_no),
- 0, RW_X_LATCH,
- buf_pool_is_obsolete((*undo)->withdraw_clock)
- ? NULL : (*undo)->guess_block,
+ 0, RW_X_LATCH, (*undo)->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
}
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index aa99bc157c9..1f734439ffe 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -2523,6 +2523,9 @@ int ha_maria::info(uint flag)
MARIA_INFO maria_info;
char name_buff[FN_REFLEN];
+ if (!table)
+ return 0;
+
(void) maria_status(file, &maria_info, flag);
if (flag & HA_STATUS_VARIABLE)
{
diff --git a/storage/mroonga/vendor/groonga/CMakeLists.txt b/storage/mroonga/vendor/groonga/CMakeLists.txt
index 564e859d146..d271d4c4eb9 100644
--- a/storage/mroonga/vendor/groonga/CMakeLists.txt
+++ b/storage/mroonga/vendor/groonga/CMakeLists.txt
@@ -240,7 +240,6 @@ include(build/ac_macros/check_functions.m4)
ac_check_symbols(fpclassify math.h)
ac_check_lib(m fpclassify)
-ac_check_lib(dl dlopen)
ac_check_lib(execinfo backtrace)
if(HAVE_LIBEXECINFO)
set(HAVE_BACKTRACE TRUE)
diff --git a/storage/mroonga/vendor/groonga/lib/CMakeLists.txt b/storage/mroonga/vendor/groonga/lib/CMakeLists.txt
index 21628b26ce2..8c71563f722 100644
--- a/storage/mroonga/vendor/groonga/lib/CMakeLists.txt
+++ b/storage/mroonga/vendor/groonga/lib/CMakeLists.txt
@@ -97,7 +97,7 @@ set(GRN_ALL_LIBRARIES
${LZ4_LIBS}
${LIBZSTD_LIBS}
${MESSAGE_PACK_LIBS}
- ${DL_LIBS}
+ ${CMAKE_DL_LIBS}
${M_LIBS}
${WS2_32_LIBS}
${MRUBY_LIBS}
diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake
index 2b4649b1fbe..5f1566edcf5 100644
--- a/storage/rocksdb/build_rocksdb.cmake
+++ b/storage/rocksdb/build_rocksdb.cmake
@@ -160,7 +160,7 @@ find_package(Threads REQUIRED)
if(WIN32)
set(SYSTEM_LIBS ${SYSTEM_LIBS} Shlwapi.lib Rpcrt4.lib)
else()
- set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT} ${LIBRT} ${LIBDL})
+ set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT} ${LIBRT} ${CMAKE_DL_LIBS})
endif()
set(ROCKSDB_LIBS rocksdblib})