diff options
author | Kentoku SHIBA <kentokushiba@gmail.com> | 2021-04-28 16:45:50 +0900 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-04-28 16:45:50 +0900 |
commit | 977115add60f0f9d6258e5ebcb512a1c97492691 (patch) | |
tree | 6c5dff26ceecebc6607a180b98b8711b88dd25f7 /storage | |
parent | b5d4964d1e56f91a0f129e72e850ed6220c52002 (diff) | |
parent | 4cd92143eae9b397589e5b449d1a85c43b3e4f6b (diff) | |
download | mariadb-git-bb-10.4-MDEV-22265.tar.gz |
Merge branch '10.4' into bb-10.4-MDEV-22265bb-10.4-MDEV-22265
Diffstat (limited to 'storage')
239 files changed, 21301 insertions, 3926 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index fa49b081ad1..9526306a601 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -243,6 +243,20 @@ Archive_share::Archive_share() } +Archive_share::~Archive_share() +{ + DBUG_PRINT("ha_archive", ("~Archive_share: %p", this)); + if (archive_write_open) + { + mysql_mutex_lock(&mutex); + (void) close_archive_writer(); // Will reset archive_write_open + mysql_mutex_unlock(&mutex); + } + thr_lock_delete(&lock); + mysql_mutex_destroy(&mutex); +} + + ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg) :handler(hton, table_arg), delayed_insert(0), bulk_insert(0) { @@ -676,7 +690,6 @@ int ha_archive::close(void) if (azclose(&archive)) rc= 1; } - DBUG_RETURN(rc); } @@ -1547,7 +1560,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded= 0; stats.auto_increment_value= 1; share->archive_write.auto_increment= 0; - my_bitmap_map *org_bitmap= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= tmp_use_all_columns(table, &table->read_set); while (!(rc= get_row(&archive, table->record[0]))) { @@ -1568,7 +1581,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) } } - tmp_restore_column_map(table->read_set, org_bitmap); + tmp_restore_column_map(&table->read_set, org_bitmap); share->rows_recorded= (ha_rows)writer.rows; } diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index b9fcf10f96f..35291e469cd 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -46,19 +46,7 @@ public: bool dirty; /* Flag for if a flush should occur */ bool crashed; /* Meta file is crashed */ Archive_share(); - ~Archive_share() - { - DBUG_PRINT("ha_archive", ("~Archive_share: %p", - this)); - if (archive_write_open) - { - mysql_mutex_lock(&mutex); - (void) close_archive_writer(); - mysql_mutex_unlock(&mutex); - } - thr_lock_delete(&lock); - mysql_mutex_destroy(&mutex); - } + virtual ~Archive_share(); int init_archive_writer(); void close_archive_writer(); int write_v1_metafile(); diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc index f081dca71c3..1d2331c1a5e 100644 --- a/storage/cassandra/ha_cassandra.cc +++ b/storage/cassandra/ha_cassandra.cc @@ -1641,18 +1641,18 @@ int ha_cassandra::index_read_map(uchar *buf, const uchar *key, char *cass_key; int cass_key_len; - my_bitmap_map *old_map; + MY_BITMAP *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len)) { /* We get here when making lookups like uuid_column='not-an-uuid' */ - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); bool found; if (se->get_slice(cass_key, cass_key_len, &found)) @@ -1726,8 +1726,8 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) cassandra_to_mariadb() calls will use field->store(...) methods, which require that the column is in the table->write_set */ - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map; + old_map= dbug_tmp_use_all_columns(table, &table->write_set); /* Start with all fields being NULL */ for (field= table->field + 1; *field; field++) @@ -1848,7 +1848,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) } err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return res; } @@ -1933,7 +1933,7 @@ void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals, int ha_cassandra::write_row(const uchar *buf) { - my_bitmap_map *old_map; + MY_BITMAP *old_map; int ires; DBUG_ENTER("ha_cassandra::write_row"); @@ -1943,7 +1943,7 @@ int ha_cassandra::write_row(const uchar *buf) if (!doing_insert_batch) se->clear_insert_buffer(); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); insert_lineno++; @@ -1954,7 +1954,7 @@ int ha_cassandra::write_row(const uchar *buf) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), rowkey_converter->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->start_row_insert(cass_key, cass_key_len); @@ -1977,7 +1977,7 @@ int ha_cassandra::write_row(const uchar *buf) free_dynamic_row(&vals, &names); if (rc) { - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(rc); } } @@ -1988,7 +1988,7 @@ int ha_cassandra::write_row(const uchar *buf) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), field_converters[i]->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->add_insert_column(field_converters[i]->field->field_name.str, 0, @@ -1996,7 +1996,7 @@ int ha_cassandra::write_row(const uchar *buf) } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); bool res; @@ -2263,8 +2263,8 @@ bool ha_cassandra::mrr_start_read() { uint key_len; - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map; + old_map= dbug_tmp_use_all_columns(table, &table->read_set); se->new_lookup_keys(); @@ -2288,7 +2288,7 @@ bool ha_cassandra::mrr_start_read() break; } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return se->multiget_slice(); } @@ -2366,7 +2366,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) LEX_STRING *oldnames, *names; uint oldcount, count; String oldvalcol, valcol; - my_bitmap_map *old_map; + MY_BITMAP *old_map; int res; DBUG_ENTER("ha_cassandra::update_row"); /* Currently, it is guaranteed that new_data == table->record[0] */ @@ -2374,7 +2374,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) /* For now, just rewrite the full record */ se->clear_insert_buffer(); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); char *old_key; int old_key_len; @@ -2387,7 +2387,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), rowkey_converter->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -2450,7 +2450,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), field_converters[i]->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->add_insert_column(field_converters[i]->field->field_name.str, 0, @@ -2477,7 +2477,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); res= se->do_insert(); diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 0af3a02a09d..b69d1a04f54 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -82,6 +82,19 @@ ENDIF(UNIX) # +# BSON: the new handling of JSON data included temporarily for testing +# + +OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON) + +IF(CONNECT_WITH_BSON) + SET(CONNECT_SOURCES ${CONNECT_SOURCES} + bson.cpp bsonudf.cpp tabbson.cpp bson.h bsonudf.h tabbson.h) + add_definitions(-DBSON_SUPPORT) +ENDIF(CONNECT_WITH_BSON) + + +# # VCT: the VEC format might be not supported in future versions # @@ -318,29 +331,29 @@ ENDIF(CONNECT_WITH_MONGO) OPTION(CONNECT_WITH_REST "Compile CONNECT storage engine with REST support" ON) IF(CONNECT_WITH_REST) - MESSAGE_ONCE(CONNECT_WITH_REST "REST support is ON") +# MESSAGE(STATUS "=====> REST support is ON") SET(CONNECT_SOURCES ${CONNECT_SOURCES} tabrest.cpp tabrest.h) add_definitions(-DREST_SUPPORT) - FIND_PACKAGE(cpprestsdk QUIET) - IF (cpprestsdk_FOUND) - IF(UNIX) -# INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR}) -# If needed edit next line to set the path to libcpprest.so - SET(REST_LIBRARY -lcpprest) - MESSAGE (STATUS ${REST_LIBRARY}) - ELSE(NOT UNIX) -# Next line sets debug compile mode matching cpprest_2_10d.dll -# when it was binary installed (can be change later in Visual Studio) -# Comment it out if not needed depending on your cpprestsdk installation. - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") - ENDIF(UNIX) -# IF(REST_LIBRARY) why this? how about Windows - SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp) - add_definitions(-DREST_SOURCE) -# ENDIF() - ELSE(NOT cpprestsdk_FOUND) -# MESSAGE(STATUS "=====> cpprestsdk package not found") - ENDIF (cpprestsdk_FOUND) +# FIND_PACKAGE(cpprestsdk QUIET) +# IF (cpprestsdk_FOUND) +# IF(UNIX) +## INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR}) +## If needed edit next line to set the path to libcpprest.so +# SET(REST_LIBRARY -lcpprest) +# MESSAGE (STATUS ${REST_LIBRARY}) +# ELSE(NOT UNIX) +## Next line sets debug compile mode matching cpprest_2_10d.dll +## when it was binary installed (can be change later in Visual Studio) +## Comment it out if not needed depending on your cpprestsdk installation. +# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") +# ENDIF(UNIX) +## IF(REST_LIBRARY) why this? how about Windows +# SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp) +# add_definitions(-DREST_SOURCE) +## ENDIF() +##ELSE(NOT cpprestsdk_FOUND) +## MESSAGE(STATUS "=====> cpprestsdk package not found") +# ENDIF (cpprestsdk_FOUND) ENDIF(CONNECT_WITH_REST) # diff --git a/storage/connect/block.h b/storage/connect/block.h index 2ca9586ee3f..c10fc4761ac 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -1,25 +1,25 @@ /**************** Block H Declares Source Code File (.H) ***************/ -/* Name: BLOCK.H Version 2.0 */ +/* Name: BLOCK.H Version 2.1 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998 */ +/* (C) Copyright to the author Olivier BERTRAND 1998 - 2020 */ /* */ /* This file contains the BLOCK pure virtual class definition. */ /*---------------------------------------------------------------------*/ /* Note: one of the main purpose of this base class is to take care */ -/* of the very specific way Plug handles memory allocation. */ +/* of the very specific way Connect handles memory allocation. */ /* Instead of allocating small chunks of storage via new or malloc */ -/* Plug works in its private memory pool in which it does the sub- */ +/* Connect works in its private memory pool in which it does the sub- */ /* allocation using the function PlugSubAlloc. These are never freed */ /* separately but when a transaction is terminated, the entire pool */ /* is set to empty, resulting in a very fast and efficient allocate */ /* process, no garbage collection problem, and an automatic recovery */ -/* procedure (via LongJump) when the memory is exhausted. */ +/* procedure (via throw) when the memory is exhausted. */ /* For this to work new must be given two parameters, first the */ /* global pointer of the Plug application, and an optional pointer to */ /* the memory pool to use, defaulting to NULL meaning using the Plug */ /* standard default memory pool, example: */ -/* tabp = new(g) XTAB("EMPLOYEE"); */ -/* allocates a XTAB class object in the standard Plug memory pool. */ +/* tabp = new(g) XTAB("EMPLOYEE"); */ +/* allocates a XTAB class object in the standard Plug memory pool. */ /***********************************************************************/ #if !defined(BLOCK_DEFINED) #define BLOCK_DEFINED @@ -37,21 +37,25 @@ typedef class BLOCK *PBLOCK; class DllExport BLOCK { public: - void * operator new(size_t size, PGLOBAL g, void *p = NULL) { - xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, p); - return (PlugSubAlloc(g, p, size)); - } // end of new + void *operator new(size_t size, PGLOBAL g, void *mp = NULL) { + xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, mp); + return PlugSubAlloc(g, mp, size); + } // end of new - virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc + void* operator new(size_t size, long long mp) { + xtrc(256, "Realloc at: mp=%lld\n", mp); + return (void*)mp; + } // end of new + + virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc -#if !defined(__BORLANDC__) - // Avoid warning C4291 by defining a matching dummy delete operator - void operator delete(void *, PGLOBAL, void *) {} - void operator delete(void *, size_t) {} -#endif - virtual ~BLOCK() {} + // Avoid gcc errors by defining matching dummy delete operators + void operator delete(void*, PGLOBAL, void *) {} + void operator delete(void*, long long) {} + void operator delete(void*) {} - }; // end of class BLOCK + virtual ~BLOCK() {} +}; // end of class BLOCK #endif // !BLOCK_DEFINED diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp new file mode 100644 index 00000000000..3c33551cb68 --- /dev/null +++ b/storage/connect/bson.cpp @@ -0,0 +1,1788 @@ +/*************** bson CPP Declares Source Code File (.H) ***************/ +/* Name: bson.cpp Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* */ +/* This file contains the BJSON classes functions. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant sections of the MariaDB header file. */ +/***********************************************************************/ +#include <my_global.h> + +/***********************************************************************/ +/* Include application header files: */ +/* global.h is header containing all global declarations. */ +/* plgdbsem.h is header containing the DB application declarations. */ +/* bson.h is header containing the BSON classes declarations. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "bson.h" + +/***********************************************************************/ +/* Check macro. */ +/***********************************************************************/ +#if defined(_DEBUG) +#define CheckType(X,Y) if (!X || X ->Type != Y) throw MSG(VALTYPE_NOMATCH); +#else +#define CheckType(X,Y) +#endif + +#if defined(__WIN__) +#define EL "\r\n" +#else +#define EL "\n" +#undef SE_CATCH // Does not work for Linux +#endif + +int GetJsonDefPrec(void); + +#if defined(SE_CATCH) +/**************************************************************************/ +/* This is the support of catching C interrupts to prevent crashes. */ +/**************************************************************************/ +#include <eh.h> + +class SE_Exception { +public: + SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {} + ~SE_Exception() {} + + unsigned int nSE; + PEXCEPTION_RECORD eRec; +}; // end of class SE_Exception + +void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) { + throw SE_Exception(u, pExp->ExceptionRecord); +} // end of trans_func + +char* GetExceptionDesc(PGLOBAL g, unsigned int e); +#endif // SE_CATCH + +/* --------------------------- Class BDOC ---------------------------- */ + +/***********************************************************************/ +/* BDOC constructor. */ +/***********************************************************************/ +BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL) +{ + jp = NULL; + s = NULL; + len = 0; + pretty = 3; + pty[0] = pty[1] = pty[2] = true; + comma = false; +} // end of BDOC constructor + +/***********************************************************************/ +/* Parse a json string. */ +/* Note: when pretty is not known, the caller set pretty to 3. */ +/***********************************************************************/ +PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng) +{ + size_t i; + bool b = false, ptyp = (bool *)pty; + PBVAL bvp = NULL; + + s = js; + len = lng; + xtrc(1, "BDOC::ParseJson: s=%.10s len=%zd\n", s, len); + + if (!s || !len) { + strcpy(g->Message, "Void JSON object"); + return NULL; + } // endif s + + // Trying to guess the pretty format + if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) + pty[0] = false; + + try { + bvp = NewVal(); + bvp->Type = TYPE_UNKNOWN; + + for (i = 0; i < len; i++) + switch (s[i]) { + case '[': + if (bvp->Type != TYPE_UNKNOWN) + bvp->To_Val = ParseAsArray(i); + else + bvp->To_Val = ParseArray(++i); + + bvp->Type = TYPE_JAR; + break; + case '{': + if (bvp->Type != TYPE_UNKNOWN) { + bvp->To_Val = ParseAsArray(i); + bvp->Type = TYPE_JAR; + } else { + bvp->To_Val = ParseObject(++i); + bvp->Type = TYPE_JOB; + } // endif Type + + break; + case ' ': + case '\t': + case '\n': + case '\r': + break; + case ',': + if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { + comma = true; + pty[0] = pty[2] = false; + break; + } // endif pretty + + sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); + throw 3; + case '(': + b = true; + break; + case ')': + if (b) { + b = false; + break; + } // endif b + + default: + if (bvp->Type != TYPE_UNKNOWN) { + bvp->To_Val = ParseAsArray(i); + bvp->Type = TYPE_JAR; + } else if ((bvp->To_Val = MOF(ParseValue(i, NewVal())))) + bvp->Type = TYPE_JVAL; + else + throw 4; + + break; + }; // endswitch s[i] + + if (bvp->Type == TYPE_UNKNOWN) + sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); + else if (pretty == 3) { + for (i = 0; i < 3; i++) + if (pty[i]) { + pretty = i; + break; + } // endif pty + + } // endif ptyp + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, G->Message); + GetMsg(g); + bvp = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + bvp = NULL; + } // end catch + + return bvp; +} // end of ParseJson + +/***********************************************************************/ +/* Parse several items as being in an array. */ +/***********************************************************************/ +OFFSET BDOC::ParseAsArray(size_t& i) { + if (pty[0] && (!pretty || pretty > 2)) { + OFFSET jsp; + + if ((jsp = ParseArray((i = 0))) && pretty == 3) + pretty = (pty[0]) ? 0 : 3; + + return jsp; + } else + strcpy(G->Message, "More than one item in file"); + + return 0; +} // end of ParseAsArray + +/***********************************************************************/ +/* Parse a JSON Array. */ +/***********************************************************************/ +OFFSET BDOC::ParseArray(size_t& i) +{ + int level = 0; + bool b = (!i); + PBVAL vlp, firstvlp, lastvlp; + + vlp = firstvlp = lastvlp = NULL; + + for (; i < len; i++) + switch (s[i]) { + case ',': + if (level < 2) { + sprintf(G->Message, "Unexpected ',' near %.*s", (int) ARGS); + throw 1; + } else + level = 1; + + break; + case ']': + if (level == 1) { + sprintf(G->Message, "Unexpected ',]' near %.*s", (int) ARGS); + throw 1; + } // endif level + + return MOF(firstvlp); + case '\n': + if (!b) + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + if (level == 2) { + sprintf(G->Message, "Unexpected value near %.*s", (int) ARGS); + throw 1; + } else if (lastvlp) { + vlp = ParseValue(i, NewVal()); + lastvlp->Next = MOF(vlp); + lastvlp = vlp; + } else + firstvlp = lastvlp = ParseValue(i, NewVal()); + + level = (b) ? 1 : 2; + break; + }; // endswitch s[i] + + if (b) { + // Case of Pretty == 0 + return MOF(firstvlp); + } // endif b + + throw ("Unexpected EOF in array"); +} // end of ParseArray + +/***********************************************************************/ +/* Parse a JSON Object. */ +/***********************************************************************/ +OFFSET BDOC::ParseObject(size_t& i) +{ + OFFSET key; + int level = 0; + PBPR bpp, firstbpp, lastbpp; + + bpp = firstbpp = lastbpp = NULL; + + for (; i < len; i++) + switch (s[i]) { + case '"': + if (level < 2) { + key = ParseString(++i); + bpp = NewPair(key); + + if (lastbpp) { + lastbpp->Vlp.Next = MOF(bpp); + lastbpp = bpp; + } else + firstbpp = lastbpp = bpp; + + level = 2; + } else { + sprintf(G->Message, "misplaced string near %.*s", (int) ARGS); + throw 2; + } // endif level + + break; + case ':': + if (level == 2) { + ParseValue(++i, GetVlp(lastbpp)); + level = 3; + } else { + sprintf(G->Message, "Unexpected ':' near %.*s", (int) ARGS); + throw 2; + } // endif level + + break; + case ',': + if (level < 3) { + sprintf(G->Message, "Unexpected ',' near %.*s", (int) ARGS); + throw 2; + } else + level = 1; + + break; + case '}': + if (!(level == 0 || level == 3)) { + sprintf(G->Message, "Unexpected '}' near %.*s", (int) ARGS); + throw 2; + } // endif level + + return MOF(firstbpp); + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + sprintf(G->Message, "Unexpected character '%c' near %.*s", + s[i], (int) ARGS); + throw 2; + }; // endswitch s[i] + + strcpy(G->Message, "Unexpected EOF in Object"); + throw 2; +} // end of ParseObject + +/***********************************************************************/ +/* Parse a JSON Value. */ +/***********************************************************************/ +PBVAL BDOC::ParseValue(size_t& i, PBVAL bvp) +{ + for (; i < len; i++) + switch (s[i]) { + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + goto suite; + } // endswitch + +suite: + switch (s[i]) { + case '[': + bvp->To_Val = ParseArray(++i); + bvp->Type = TYPE_JAR; + break; + case '{': + bvp->To_Val = ParseObject(++i); + bvp->Type = TYPE_JOB; + break; + case '"': + bvp->To_Val = ParseString(++i); + bvp->Type = TYPE_STRG; + break; + case 't': + if (!strncmp(s + i, "true", 4)) { + bvp->B = true; + bvp->Type = TYPE_BOOL; + i += 3; + } else + goto err; + + break; + case 'f': + if (!strncmp(s + i, "false", 5)) { + bvp->B = false; + bvp->Type = TYPE_BOOL; + i += 4; + } else + goto err; + + break; + case 'n': + if (!strncmp(s + i, "null", 4)) { + bvp->Type = TYPE_NULL; + i += 3; + } else + goto err; + + break; + case '-': + default: + if (s[i] == '-' || isdigit(s[i])) + ParseNumeric(i, bvp); + else + goto err; + + }; // endswitch s[i] + + return bvp; + +err: + sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], (int) ARGS); + throw 3; +} // end of ParseValue + +/***********************************************************************/ +/* Unescape and parse a JSON string. */ +/***********************************************************************/ +OFFSET BDOC::ParseString(size_t& i) +{ + uchar* p; + int n = 0; + + // Be sure of memory availability + if (((size_t)len + 1 - i) > ((PPOOLHEADER)G->Sarea)->FreeBlk) + throw("ParseString: Out of memory"); + + // The size to allocate is not known yet + p = (uchar*)BsonSubAlloc(0); + + for (; i < len; i++) + switch (s[i]) { + case '"': + p[n++] = 0; + BsonSubAlloc(n); + return MOF(p); + case '\\': + if (++i < len) { + if (s[i] == 'u') { + if (len - i > 5) { + // if (charset == utf8) { + char xs[5]; + uint hex; + + xs[0] = s[++i]; + xs[1] = s[++i]; + xs[2] = s[++i]; + xs[3] = s[++i]; + xs[4] = 0; + hex = strtoul(xs, NULL, 16); + + if (hex < 0x80) { + p[n] = (uchar)hex; + } else if (hex < 0x800) { + p[n++] = (uchar)(0xC0 | (hex >> 6)); + p[n] = (uchar)(0x80 | (hex & 0x3F)); + } else if (hex < 0x10000) { + p[n++] = (uchar)(0xE0 | (hex >> 12)); + p[n++] = (uchar)(0x80 | ((hex >> 6) & 0x3f)); + p[n] = (uchar)(0x80 | (hex & 0x3f)); + } else + p[n] = '?'; + +#if 0 + } else { + char xs[3]; + UINT hex; + + i += 2; + xs[0] = s[++i]; + xs[1] = s[++i]; + xs[2] = 0; + hex = strtoul(xs, NULL, 16); + p[n] = (char)hex; + } // endif charset +#endif // 0 + } else + goto err; + + } else switch (s[i]) { + case 't': p[n] = '\t'; break; + case 'n': p[n] = '\n'; break; + case 'r': p[n] = '\r'; break; + case 'b': p[n] = '\b'; break; + case 'f': p[n] = '\f'; break; + default: p[n] = s[i]; break; + } // endswitch + + n++; + } else + goto err; + + break; + default: + p[n++] = s[i]; + break; +}; // endswitch s[i] + +err: +throw("Unexpected EOF in String"); +} // end of ParseString + +/***********************************************************************/ +/* Parse a JSON numeric value. */ +/***********************************************************************/ +void BDOC::ParseNumeric(size_t& i, PBVAL vlp) +{ + char buf[50]; + int n = 0; + short nd = 0; + bool has_dot = false; + bool has_e = false; + bool found_digit = false; + + for (; i < len; i++) { + switch (s[i]) { + case '.': + if (!found_digit || has_dot || has_e) + goto err; + + has_dot = true; + break; + case 'e': + case 'E': + if (!found_digit || has_e) + goto err; + + has_e = true; + found_digit = false; + break; + case '+': + if (!has_e) + goto err; + + // fall through + case '-': + if (found_digit) + goto err; + + break; + default: + if (isdigit(s[i])) { + if (has_dot && !has_e) + nd++; // Number of decimals + + found_digit = true; + } else + goto fin; + + }; // endswitch s[i] + + buf[n++] = s[i]; + } // endfor i + +fin: + if (found_digit) { + buf[n] = 0; + + if (has_dot || has_e) { + double dv = atof(buf); + + if (nd >= 6 || dv > FLT_MAX || dv < FLT_MIN) { + double* dvp = (double*)PlugSubAlloc(G, NULL, sizeof(double)); + + *dvp = dv; + vlp->To_Val = MOF(dvp); + vlp->Type = TYPE_DBL; + } else { + vlp->F = (float)dv; + vlp->Type = TYPE_FLOAT; + } // endif nd + + vlp->Nd = MY_MIN(nd, 16); + } else { + longlong iv = strtoll(buf, NULL, 10); + + if (iv > INT_MAX32 || iv < INT_MIN32) { + longlong *llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong)); + + *llp = iv; + vlp->To_Val = MOF(llp); + vlp->Type = TYPE_BINT; + } else { + vlp->N = (int)iv; + vlp->Type = TYPE_INTG; + } // endif iv + + } // endif has + + i--; // Unstack following character + return; + } else + throw("No digit found"); + +err: + throw("Unexpected EOF in number"); +} // end of ParseNumeric + +/***********************************************************************/ +/* Serialize a BJSON document tree: */ +/***********************************************************************/ +PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) +{ + PSZ str = NULL; + bool b = false, err = true; + FILE* fs = NULL; + + G->Message[0] = 0; + + try { + if (!bvp) { + strcpy(g->Message, "Null json tree"); + throw 1; + } else if (!fn) { + // Serialize to a string + jp = new(g) JOUTSTR(g); + b = pretty == 1; + } else { + if (!(fs = fopen(fn, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "w", (int)errno, fn); + strcat(strcat(g->Message, ": "), strerror(errno)); + throw 2; + } else if (pretty >= 2) { + // Serialize to a pretty file + jp = new(g)JOUTPRT(g, fs); + } else { + // Serialize to a flat file + b = true; + jp = new(g)JOUTFILE(g, fs, pretty); + } // endif's + + } // endif's + + switch (bvp->Type) { + case TYPE_JAR: + err = SerializeArray(bvp->To_Val, b); + break; + case TYPE_JOB: + err = ((b && jp->Prty()) && jp->WriteChr('\t')); + err |= SerializeObject(bvp->To_Val); + break; + case TYPE_JVAL: + err = SerializeValue(MVP(bvp->To_Val)); + break; + default: + err = SerializeValue(bvp, true); + } // endswitch Type + + if (fs) { + fputs(EL, fs); + fclose(fs); + str = (err) ? NULL : strcpy(g->Message, "Ok"); + } else if (!err) { + str = ((JOUTSTR*)jp)->Strp; + jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); + } else if (G->Message[0]) + strcpy(g->Message, "Error in Serialize"); + else + GetMsg(g); + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, G->Message); + GetMsg(g); + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + str = NULL; + } // end catch + + return str; +} // end of Serialize + + +/***********************************************************************/ +/* Serialize a JSON Array. */ +/***********************************************************************/ +bool BDOC::SerializeArray(OFFSET arp, bool b) +{ + bool first = true; + PBVAL vp = MVP(arp); + + if (b) { + if (jp->Prty()) { + if (jp->WriteChr('[')) + return true; + else if (jp->Prty() == 1 && (jp->WriteStr(EL) || jp->WriteChr('\t'))) + return true; + + } // endif Prty + + } else if (jp->WriteChr('[')) + return true; + + for (vp; vp; vp = MVP(vp->Next)) { + if (first) + first = false; + else if ((!b || jp->Prty()) && jp->WriteChr(',')) + return true; + else if (b) { + if (jp->Prty() < 2 && jp->WriteStr(EL)) + return true; + else if (jp->Prty() == 1 && jp->WriteChr('\t')) + return true; + + } // endif b + + if (SerializeValue(vp)) + return true; + + } // endfor vp + + if (b && jp->Prty() == 1 && jp->WriteStr(EL)) + return true; + + return ((!b || jp->Prty()) && jp->WriteChr(']')); +} // end of SerializeArray + +/***********************************************************************/ +/* Serialize a JSON Object. */ +/***********************************************************************/ +bool BDOC::SerializeObject(OFFSET obp) +{ + bool first = true; + PBPR prp = MPP(obp); + + if (jp->WriteChr('{')) + return true; + + for (prp; prp; prp = GetNext(prp)) { + if (first) + first = false; + else if (jp->WriteChr(',')) + return true; + + if (jp->WriteChr('"') || + jp->WriteStr(MZP(prp->Key)) || + jp->WriteChr('"') || + jp->WriteChr(':') || + SerializeValue(GetVlp(prp))) + return true; + + } // endfor i + + return jp->WriteChr('}'); +} // end of SerializeObject + +/***********************************************************************/ +/* Serialize a JSON Value. */ +/***********************************************************************/ +bool BDOC::SerializeValue(PBVAL jvp, bool b) +{ + char buf[64]; + + if (jvp) switch (jvp->Type) { + case TYPE_JAR: + return SerializeArray(jvp->To_Val, false); + case TYPE_JOB: + return SerializeObject(jvp->To_Val); + case TYPE_BOOL: + return jp->WriteStr(jvp->B ? "true" : "false"); + case TYPE_STRG: + case TYPE_DTM: + if (b) { + return jp->WriteStr(MZP(jvp->To_Val)); + } else + return jp->Escape(MZP(jvp->To_Val)); + + case TYPE_INTG: + sprintf(buf, "%d", jvp->N); + return jp->WriteStr(buf); + case TYPE_BINT: + sprintf(buf, "%lld", *(longlong*)MakePtr(Base, jvp->To_Val)); + return jp->WriteStr(buf); + case TYPE_FLOAT: + sprintf(buf, "%.*f", jvp->Nd, jvp->F); + return jp->WriteStr(buf); + case TYPE_DBL: + sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(Base, jvp->To_Val)); + return jp->WriteStr(buf); + case TYPE_NULL: + return jp->WriteStr("null"); + case TYPE_JVAL: + return SerializeValue(MVP(jvp->To_Val)); + default: + return jp->WriteStr("???"); // TODO + } // endswitch Type + + return jp->WriteStr("null"); +} // end of SerializeValue + +/* --------------------------- Class BJSON --------------------------- */ + +/***********************************************************************/ +/* Program for sub-allocating Bjson structures. */ +/***********************************************************************/ +void* BJSON::BsonSubAlloc(size_t size) +{ + PPOOLHEADER pph; /* Points on area header. */ + void* memp = G->Sarea; + + size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ + pph = (PPOOLHEADER)memp; + + xtrc(16, "SubAlloc in %p size=%zd used=%zd free=%zd\n", + memp, size, pph->To_Free, pph->FreeBlk); + + if (size > pph->FreeBlk) { /* Not enough memory left in pool */ + sprintf(G->Message, + "Not enough memory for request of %zd (used=%zd free=%zd)", + size, pph->To_Free, pph->FreeBlk); + xtrc(1, "BsonSubAlloc: %s\n", G->Message); + + if (Throw) + throw(1234); + else + return NULL; + + } /* endif size OS32 code */ + + // Do the suballocation the simplest way + memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ + pph->To_Free += size; /* New offset of pool free block */ + pph->FreeBlk -= size; /* New size of pool free block */ + xtrc(16, "Done memp=%p used=%zd free=%zd\n", + memp, pph->To_Free, pph->FreeBlk); + return memp; +} // end of BsonSubAlloc + +/*********************************************************************************/ +/* Program for SubSet re-initialization of the memory pool. */ +/*********************************************************************************/ +PSZ BJSON::NewStr(PSZ str) +{ + if (str) { + PSZ sm = (PSZ)BsonSubAlloc(strlen(str) + 1); + + strcpy(sm, str); + return sm; + } else + return NULL; + +} // end of NewStr + +/*********************************************************************************/ +/* Program for SubSet re-initialization of the memory pool. */ +/*********************************************************************************/ +void BJSON::SubSet(bool b) +{ + PPOOLHEADER pph = (PPOOLHEADER)G->Sarea; + + pph->To_Free = (G->Saved_Size) ? G->Saved_Size : sizeof(POOLHEADER); + pph->FreeBlk = G->Sarea_Size - pph->To_Free; + + if (b) + G->Saved_Size = 0; + +} // end of SubSet + +/*********************************************************************************/ +/* Set the beginning of suballocations. */ +/*********************************************************************************/ +void BJSON::MemSet(size_t size) +{ + PPOOLHEADER pph = (PPOOLHEADER)G->Sarea; + + pph->To_Free = size + sizeof(POOLHEADER); + pph->FreeBlk = G->Sarea_Size - pph->To_Free; +} // end of MemSet + + /* ------------------------ Bobject functions ------------------------ */ + +/***********************************************************************/ +/* Set a pair vlp to some PVAL values. */ +/***********************************************************************/ +void BJSON::SetPairValue(PBPR brp, PBVAL bvp) +{ + if (bvp) { + brp->Vlp.To_Val = bvp->To_Val; + brp->Vlp.Nd = bvp->Nd; + brp->Vlp.Type = bvp->Type; + } else { + brp->Vlp.To_Val = 0; + brp->Vlp.Nd = 0; + brp->Vlp.Type = TYPE_NULL; + } // endif bvp + +} // end of SetPairValue + + /***********************************************************************/ +/* Sub-allocate and initialize a BPAIR. */ +/***********************************************************************/ +PBPR BJSON::NewPair(OFFSET key, int type) +{ + PBPR bpp = (PBPR)BsonSubAlloc(sizeof(BPAIR)); + + bpp->Key = key; + bpp->Vlp.Type = type; + bpp->Vlp.To_Val = 0; + bpp->Vlp.Nd = 0; + bpp->Vlp.Next = 0; + return bpp; +} // end of SubAllocPair + +/***********************************************************************/ +/* Return the number of pairs in this object. */ +/***********************************************************************/ +int BJSON::GetObjectSize(PBVAL bop, bool b) +{ + CheckType(bop, TYPE_JOB); + int n = 0; + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + // If b return only non null pairs + if (!b || (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL)) + n++; + + return n; +} // end of GetObjectSize + +/***********************************************************************/ +/* Add a new pair to an Object and return it. */ +/***********************************************************************/ +PBVAL BJSON::AddPair(PBVAL bop, PSZ key, int type) +{ + CheckType(bop, TYPE_JOB); + PBPR brp; + OFFSET nrp = NewPair(key, type); + + if (bop->To_Val) { + for (brp = GetObject(bop); brp->Vlp.Next; brp = GetNext(brp)); + + brp->Vlp.Next = nrp; + } else + bop->To_Val = nrp; + + bop->Nd++; + return GetVlp(MPP(nrp)); +} // end of AddPair + +/***********************************************************************/ +/* Return all object keys as an array. */ +/***********************************************************************/ +PBVAL BJSON::GetKeyList(PBVAL bop) +{ + CheckType(bop, TYPE_JOB); + PBVAL arp = NewVal(TYPE_JAR); + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + AddArrayValue(arp, MOF(SubAllocVal(brp->Key, TYPE_STRG))); + + return arp; +} // end of GetKeyList + +/***********************************************************************/ +/* Return all object values as an array. */ +/***********************************************************************/ +PBVAL BJSON::GetObjectValList(PBVAL bop) +{ + CheckType(bop, TYPE_JOB); + PBVAL arp = NewVal(TYPE_JAR); + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + AddArrayValue(arp, DupVal(GetVlp(brp))); + + return arp; +} // end of GetObjectValList + +/***********************************************************************/ +/* Get the value corresponding to the given key. */ +/***********************************************************************/ +PBVAL BJSON::GetKeyValue(PBVAL bop, PSZ key) +{ + CheckType(bop, TYPE_JOB); + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + if (!strcmp(GetKey(brp), key)) + return GetVlp(brp); + + return NULL; +} // end of GetKeyValue; + +/***********************************************************************/ +/* Return the text corresponding to all keys (XML like). */ +/***********************************************************************/ +PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) +{ + CheckType(bop, TYPE_JOB); + PBPR brp = GetObject(bop); + + if (brp) { + bool b; + + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(' '); + + b = false; + } // endif text + + if (b && !brp->Vlp.Next && !strcmp(MZP(brp->Key), "$date")) { + int i; + PSZ s; + + GetValueText(g, GetVlp(brp), text); + s = text->GetStr(); + i = (s[1] == '-' ? 2 : 1); + + if (IsNum(s + i)) { + // Date is in milliseconds + int j = text->GetLength(); + + if (j >= 4 + i) { + s[j - 3] = 0; // Change it to seconds + text->SetLength((uint)strlen(s)); + } else + text->Set(" 0"); + + } // endif text + + } else for (; brp; brp = GetNext(brp)) { + GetValueText(g, GetVlp(brp), text); + + if (brp->Vlp.Next) + text->Append(' '); + + } // endfor brp + + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b + + } // endif bop + + return NULL; +} // end of GetObjectText; + +/***********************************************************************/ +/* Set or add a value corresponding to the given key. */ +/***********************************************************************/ +void BJSON::SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key) +{ + CheckType(bop, TYPE_JOB); + PBPR brp, prp = NULL; + + if (bop->To_Val) { + for (brp = GetObject(bop); brp; brp = GetNext(brp)) + if (!strcmp(GetKey(brp), key)) + break; + else + prp = brp; + + if (!brp) + brp = MPP(prp->Vlp.Next = NewPair(key)); + + } else + brp = MPP(bop->To_Val = NewPair(key)); + + SetPairValue(brp, MVP(bvp)); + bop->Nd++; +} // end of SetKeyValue + +/***********************************************************************/ +/* Merge two objects. */ +/***********************************************************************/ +PBVAL BJSON::MergeObject(PBVAL bop1, PBVAL bop2) +{ + CheckType(bop1, TYPE_JOB); + CheckType(bop2, TYPE_JOB); + + if (bop1->To_Val) + for (PBPR brp = GetObject(bop2); brp; brp = GetNext(brp)) + SetKeyValue(bop1, GetVlp(brp), GetKey(brp)); + + else { + bop1->To_Val = bop2->To_Val; + bop1->Nd = bop2->Nd; + } // endelse To_Val + + return bop1; +} // end of MergeObject; + +/***********************************************************************/ +/* Delete a value corresponding to the given key. */ +/***********************************************************************/ +bool BJSON::DeleteKey(PBVAL bop, PCSZ key) +{ + CheckType(bop, TYPE_JOB); + PBPR brp, pbrp = NULL; + + for (brp = GetObject(bop); brp; brp = GetNext(brp)) + if (!strcmp(MZP(brp->Key), key)) { + if (pbrp) { + pbrp->Vlp.Next = brp->Vlp.Next; + } else + bop->To_Val = brp->Vlp.Next; + + bop->Nd--; + return true;; + } else + pbrp = brp; + + return false; +} // end of DeleteKey + +/***********************************************************************/ +/* True if void or if all members are nulls. */ +/***********************************************************************/ +bool BJSON::IsObjectNull(PBVAL bop) +{ + CheckType(bop, TYPE_JOB); + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + if (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL) + return false; + + return true; +} // end of IsObjectNull + +/* ------------------------- Barray functions ------------------------ */ + +/***********************************************************************/ +/* Return the number of values in this object. */ +/***********************************************************************/ +int BJSON::GetArraySize(PBVAL bap, bool b) +{ + CheckType(bap, TYPE_JAR); + int n = 0; + + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) + // If b, return only non null values + if (!b || bvp->Type != TYPE_NULL) + n++; + + return n; +} // end of GetArraySize + +/***********************************************************************/ +/* Get the Nth value of an Array. */ +/***********************************************************************/ +PBVAL BJSON::GetArrayValue(PBVAL bap, int n) +{ + CheckType(bap, TYPE_JAR); + int i = 0; + + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++) + if (i == n) + return bvp; + + return NULL; +} // end of GetArrayValue + +/***********************************************************************/ +/* Add a Value to the Array Value list. */ +/***********************************************************************/ +void BJSON::AddArrayValue(PBVAL bap, OFFSET nbv, int* x) +{ + CheckType(bap, TYPE_JAR); + int i = 0; + PBVAL bvp, lbp = NULL; + + if (!nbv) + nbv = MOF(NewVal()); + + for (bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++) + if (x && i == *x) + break; + else + lbp = bvp; + + if (lbp) { + MVP(nbv)->Next = lbp->Next; + lbp->Next = nbv; + } else { + MVP(nbv)->Next = bap->To_Val; + bap->To_Val = nbv; + } // endif lbp + + bap->Nd++; +} // end of AddArrayValue + +/***********************************************************************/ +/* Merge two arrays. */ +/***********************************************************************/ +void BJSON::MergeArray(PBVAL bap1, PBVAL bap2) +{ + CheckType(bap1, TYPE_JAR); + CheckType(bap2, TYPE_JAR); + + if (bap1->To_Val) { + for (PBVAL bvp = GetArray(bap2); bvp; bvp = GetNext(bvp)) + AddArrayValue(bap1, MOF(DupVal(bvp))); + + } else { + bap1->To_Val = bap2->To_Val; + bap1->Nd = bap2->Nd; + } // endif To_Val + +} // end of MergeArray + +/***********************************************************************/ +/* Set the nth Value of the Array Value list or add it. */ +/***********************************************************************/ +void BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n) +{ + CheckType(bap, TYPE_JAR); + int i = 0; + PBVAL bvp = NULL; + + if (bap->To_Val) + for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) + if (i == n) { + SetValueVal(bvp, nvp); + return; + } + + if (!bvp) + AddArrayValue(bap, MOF(nvp)); + +} // end of SetValue + +/***********************************************************************/ +/* Return the text corresponding to all values. */ +/***********************************************************************/ +PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) +{ + CheckType(bap, TYPE_JAR); + + if (bap->To_Val) { + bool b; + + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(" ("); + else + text->Append('('); + + b = false; + } // endif text + + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) { + GetValueText(g, bvp, text); + + if (bvp->Next) + text->Append(", "); + else if (!b) + text->Append(')'); + + } // endfor bvp + + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b + + } // endif To_Val + + return NULL; +} // end of GetText; + +/***********************************************************************/ +/* Delete a Value from the Arrays Value list. */ +/***********************************************************************/ +bool BJSON::DeleteValue(PBVAL bap, int n) +{ + CheckType(bap, TYPE_JAR); + int i = 0; + PBVAL bvp, pvp = NULL; + + for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) + if (i == n) { + if (pvp) + pvp->Next = bvp->Next; + else + bap->To_Val = bvp->Next; + + bap->Nd--; + return true;; + } else + pvp = bvp; + + return false; +} // end of DeleteValue + +/***********************************************************************/ +/* True if void or if all members are nulls. */ +/***********************************************************************/ +bool BJSON::IsArrayNull(PBVAL bap) +{ + CheckType(bap, TYPE_JAR); + + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) + if (bvp->Type != TYPE_NULL) + return false; + + return true; +} // end of IsNull + +/* ------------------------- Bvalue functions ------------------------ */ + +/***********************************************************************/ +/* Sub-allocate and clear a BVAL. */ +/***********************************************************************/ +PBVAL BJSON::NewVal(int type) +{ + PBVAL bvp = (PBVAL)BsonSubAlloc(sizeof(BVAL)); + + bvp->To_Val = 0; + bvp->Nd = 0; + bvp->Type = type; + bvp->Next = 0; + return bvp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL as type. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocVal(OFFSET toval, int type, short nd) +{ + PBVAL bvp = NewVal(type); + + bvp->To_Val = toval; + bvp->Nd = nd; + return bvp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL as string. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocStr(OFFSET toval, short nd) +{ + PBVAL bvp = NewVal(TYPE_STRG); + + bvp->To_Val = toval; + bvp->Nd = nd; + return bvp; +} // end of SubAllocStr + +/***********************************************************************/ +/* Allocate a BVALUE with a given string or numeric value. */ +/***********************************************************************/ +PBVAL BJSON::NewVal(PVAL valp) +{ + PBVAL vlp = NewVal(); + + SetValue(vlp, valp); + return vlp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL from another BVAL. */ +/***********************************************************************/ +PBVAL BJSON::DupVal(PBVAL bvlp) { + PBVAL bvp = NewVal(); + + *bvp = *bvlp; + bvp->Next = 0; + return bvp; +} // end of DupVal + +/***********************************************************************/ +/* Return the size of value's value. */ +/***********************************************************************/ +int BJSON::GetSize(PBVAL vlp, bool b) +{ + switch (vlp->Type) { + case TYPE_JAR: + return GetArraySize(vlp); + case TYPE_JOB: + return GetObjectSize(vlp); + default: + return 1; + } // enswitch Type + +} // end of GetSize + +PBVAL BJSON::GetBson(PBVAL bvp) +{ + PBVAL bp = NULL; + + switch (bvp->Type) { + case TYPE_JAR: + bp = MVP(bvp->To_Val); + break; + case TYPE_JOB: + bp = GetVlp(MPP(bvp->To_Val)); + break; + default: + bp = bvp; + break; + } // endswitch Type + + return bp; +} // end of GetBson + +/***********************************************************************/ +/* Return the Value's as a Value struct. */ +/***********************************************************************/ +PVAL BJSON::GetValue(PGLOBAL g, PBVAL vp) +{ + double d; + PVAL valp; + PBVAL vlp = vp->Type == TYPE_JVAL ? MVP(vp->To_Val) : vp; + + switch (vlp->Type) { + case TYPE_STRG: + case TYPE_DBL: + case TYPE_BINT: + valp = AllocateValue(g, MP(vlp->To_Val), vlp->Type, vlp->Nd); + break; + case TYPE_INTG: + case TYPE_BOOL: + valp = AllocateValue(g, vlp, vlp->Type); + break; + case TYPE_FLOAT: + d = (double)vlp->F; + valp = AllocateValue(g, &d, TYPE_DOUBLE, vlp->Nd); + break; + default: + valp = NULL; + break; + } // endswitch Type + + return valp; +} // end of GetValue + +/***********************************************************************/ +/* Return the Value's Integer value. */ +/***********************************************************************/ +int BJSON::GetInteger(PBVAL vp) { + int n; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; + + switch (vlp->Type) { + case TYPE_INTG: + n = vlp->N; + break; + case TYPE_FLOAT: + n = (int)vlp->F; + break; + case TYPE_DTM: + case TYPE_STRG: + n = atoi(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + n = (vlp->B) ? 1 : 0; + break; + case TYPE_BINT: + n = (int)*(longlong*)MP(vlp->To_Val); + break; + case TYPE_DBL: + n = (int)*(double*)MP(vlp->To_Val); + break; + default: + n = 0; + } // endswitch Type + + return n; +} // end of GetInteger + +/***********************************************************************/ +/* Return the Value's Big integer value. */ +/***********************************************************************/ +longlong BJSON::GetBigint(PBVAL vp) { + longlong lln; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; + + switch (vlp->Type) { + case TYPE_BINT: + lln = *(longlong*)MP(vlp->To_Val); + break; + case TYPE_INTG: + lln = (longlong)vlp->N; + break; + case TYPE_FLOAT: + lln = (longlong)vlp->F; + break; + case TYPE_DBL: + lln = (longlong)*(double*)MP(vlp->To_Val); + break; + case TYPE_DTM: + case TYPE_STRG: + lln = atoll(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + lln = (vlp->B) ? 1 : 0; + break; + default: + lln = 0; + } // endswitch Type + + return lln; +} // end of GetBigint + +/***********************************************************************/ +/* Return the Value's Double value. */ +/***********************************************************************/ +double BJSON::GetDouble(PBVAL vp) +{ + double d; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; + + switch (vlp->Type) { + case TYPE_DBL: + d = *(double*)MP(vlp->To_Val); + break; + case TYPE_BINT: + d = (double)*(longlong*)MP(vlp->To_Val); + break; + case TYPE_INTG: + d = (double)vlp->N; + break; + case TYPE_FLOAT: + d = (double)vlp->F; + break; + case TYPE_DTM: + case TYPE_STRG: + d = atof(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + d = (vlp->B) ? 1.0 : 0.0; + break; + default: + d = 0.0; + } // endswitch Type + + return d; +} // end of GetDouble + +/***********************************************************************/ +/* Return the Value's String value. */ +/***********************************************************************/ +PSZ BJSON::GetString(PBVAL vp, char* buff) +{ + char buf[32]; + char* p = (buff) ? buff : buf; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; + + switch (vlp->Type) { + case TYPE_DTM: + case TYPE_STRG: + p = MZP(vlp->To_Val); + break; + case TYPE_INTG: + sprintf(p, "%d", vlp->N); + break; + case TYPE_FLOAT: + sprintf(p, "%.*f", vlp->Nd, vlp->F); + break; + case TYPE_BINT: + sprintf(p, "%lld", *(longlong*)MP(vlp->To_Val)); + break; + case TYPE_DBL: + sprintf(p, "%.*lf", vlp->Nd, *(double*)MP(vlp->To_Val)); + break; + case TYPE_BOOL: + p = (PSZ)((vlp->B) ? "true" : "false"); + break; + case TYPE_NULL: + p = (PSZ)"null"; + break; + default: + p = NULL; + } // endswitch Type + + return (p == buf) ? (PSZ)PlugDup(G, buf) : p; +} // end of GetString + +/***********************************************************************/ +/* Return the Value's String value. */ +/***********************************************************************/ +PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text) +{ + if (vlp->Type == TYPE_JOB) + return GetObjectText(g, vlp, text); + else if (vlp->Type == TYPE_JAR) + return GetArrayText(g, vlp, text); + + char buff[32]; + PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(vlp, buff); + + if (s) + text->Append(s); + else if (GetJsonNull()) + text->Append(GetJsonNull()); + + return NULL; +} // end of GetText + +void BJSON::SetValueObj(PBVAL vlp, PBVAL bop) +{ + CheckType(bop, TYPE_JOB); + vlp->To_Val = bop->To_Val; + vlp->Nd = bop->Nd; + vlp->Type = TYPE_JOB; +} // end of SetValueObj; + +void BJSON::SetValueArr(PBVAL vlp, PBVAL bap) +{ + CheckType(bap, TYPE_JAR); + vlp->To_Val = bap->To_Val; + vlp->Nd = bap->Nd; + vlp->Type = TYPE_JAR; +} // end of SetValue; + +void BJSON::SetValueVal(PBVAL vlp, PBVAL vp) +{ + vlp->To_Val = vp->To_Val; + vlp->Nd = vp->Nd; + vlp->Type = vp->Type; +} // end of SetValue; + +PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) +{ + if (!vlp) + vlp = NewVal(); + + if (!valp || valp->IsNull()) { + vlp->Type = TYPE_NULL; + } else switch (valp->GetType()) { + case TYPE_DATE: + if (((DTVAL*)valp)->IsFormatted()) + vlp->To_Val = DupStr(valp->GetCharValue()); + else { + char buf[32]; + + vlp->To_Val = DupStr(valp->GetCharString(buf)); + } // endif Formatted + + vlp->Type = TYPE_DTM; + break; + case TYPE_STRING: + vlp->To_Val = DupStr(valp->GetCharValue()); + vlp->Type = TYPE_STRG; + break; + case TYPE_DOUBLE: + case TYPE_DECIM: + { double d = valp->GetFloatValue(); + int nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0; + + if (nd > 0 && nd <= 6 && d >= FLT_MIN && d <= FLT_MAX) { + vlp->F = (float)valp->GetFloatValue(); + vlp->Type = TYPE_FLOAT; + } else { + double* dp = (double*)BsonSubAlloc(sizeof(double)); + + *dp = d; + vlp->To_Val = MOF(dp); + vlp->Type = TYPE_DBL; + } // endif Nd + + vlp->Nd = MY_MIN(nd, 16); + } break; + case TYPE_TINY: + vlp->B = valp->GetTinyValue() != 0; + vlp->Type = TYPE_BOOL; + break; + case TYPE_INT: + vlp->N = valp->GetIntValue(); + vlp->Type = TYPE_INTG; + break; + case TYPE_BIGINT: + if (valp->GetBigintValue() >= INT_MIN32 && + valp->GetBigintValue() <= INT_MAX32) { + vlp->N = valp->GetIntValue(); + vlp->Type = TYPE_INTG; + } else { + longlong* llp = (longlong*)BsonSubAlloc(sizeof(longlong)); + + *llp = valp->GetBigintValue(); + vlp->To_Val = MOF(llp); + vlp->Type = TYPE_BINT; + } // endif BigintValue + + break; + default: + sprintf(G->Message, "Unsupported typ %d\n", valp->GetType()); + throw(777); + } // endswitch Type + + return vlp; +} // end of SetValue + +/***********************************************************************/ +/* Set the Value's value as the given integer. */ +/***********************************************************************/ +void BJSON::SetInteger(PBVAL vlp, int n) +{ + vlp->N = n; + vlp->Type = TYPE_INTG; +} // end of SetInteger + +/***********************************************************************/ +/* Set the Value's Boolean value as a tiny integer. */ +/***********************************************************************/ +void BJSON::SetBool(PBVAL vlp, bool b) +{ + vlp->B = b; + vlp->Type = TYPE_BOOL; +} // end of SetTiny + +/***********************************************************************/ +/* Set the Value's value as the given big integer. */ +/***********************************************************************/ +void BJSON::SetBigint(PBVAL vlp, longlong ll) +{ + if (ll >= INT_MIN32 && ll <= INT_MAX32) { + vlp->N = (int)ll; + vlp->Type = TYPE_INTG; + } else { + longlong* llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong)); + + *llp = ll; + vlp->To_Val = MOF(llp); + vlp->Type = TYPE_BINT; + } // endif ll + +} // end of SetBigint + +/***********************************************************************/ +/* Set the Value's value as the given DOUBLE. */ +/***********************************************************************/ +void BJSON::SetFloat(PBVAL vlp, double d, int prec) +{ + int nd = MY_MIN((prec < 0) ? GetJsonDefPrec() : prec, 16); + + if (nd < 6 && d >= FLT_MIN && d <= FLT_MAX) { + vlp->F = (float)d; + vlp->Type = TYPE_FLOAT; + } else { + double* dp = (double*)BsonSubAlloc(sizeof(double)); + + *dp = d; + vlp->To_Val = MOF(dp); + vlp->Type = TYPE_DBL; + } // endif nd + + vlp->Nd = nd; +} // end of SetFloat + +/***********************************************************************/ +/* Set the Value's value as the given DOUBLE representation. */ +/***********************************************************************/ +void BJSON::SetFloat(PBVAL vlp, PSZ s) +{ + char *p = strchr(s, '.'); + int nd = 0; + double d = atof(s); + + if (p) { + for (++p; isdigit(*p); nd++, p++); + for (--p; *p == '0'; nd--, p--); + } // endif p + + SetFloat(vlp, d, nd); +} // end of SetFloat + + /***********************************************************************/ +/* Set the Value's value as the given string. */ +/***********************************************************************/ +void BJSON::SetString(PBVAL vlp, PSZ s, int ci) +{ + vlp->To_Val = MOF(s); + vlp->Nd = ci; + vlp->Type = TYPE_STRG; +} // end of SetString + +/***********************************************************************/ +/* True when its JSON or normal value is null. */ +/***********************************************************************/ +bool BJSON::IsValueNull(PBVAL vlp) +{ + bool b; + + switch (vlp->Type) { + case TYPE_NULL: + b = true; + break; + case TYPE_JOB: + b = IsObjectNull(vlp); + break; + case TYPE_JAR: + b = IsArrayNull(vlp); + break; + default: + b = false; + } // endswitch Type + + return b; + } // end of IsNull diff --git a/storage/connect/bson.h b/storage/connect/bson.h new file mode 100644 index 00000000000..acc36e8e0ed --- /dev/null +++ b/storage/connect/bson.h @@ -0,0 +1,207 @@ +/**************** bson H Declares Source Code File (.H) ****************/ +/* Name: bson.h Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* */ +/* This file contains the BSON classe declares. */ +/***********************************************************************/ +#pragma once +#include <mysql_com.h> +#include "json.h" +#include "xobject.h" + +#if defined(_DEBUG) +#define X assert(false); +#else +#define X +#endif + +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) + +class BDOC; +class BOUT; +class BJSON; + +typedef class BDOC* PBDOC; +typedef class BJSON* PBJSON; +typedef uint OFFSET; + +/***********************************************************************/ +/* Structure BVAL. Binary representation of a JVALUE. */ +/***********************************************************************/ +typedef struct _jvalue { + union { + OFFSET To_Val; // Offset to a value + int N; // An integer value + float F; // A float value + bool B; // A boolean value True or false (0) + }; + short Nd; // Number of decimals + short Type; // The value type + OFFSET Next; // Offset to the next value in array +} BVAL, *PBVAL; // end of struct BVALUE + +/***********************************************************************/ +/* Structure BPAIR. The pairs of a json Object. */ +/***********************************************************************/ +typedef struct _jpair { + OFFSET Key; // Offset to this pair key name + BVAL Vlp; // The value of the pair +} BPAIR, *PBPR; // end of struct BPAIR + +char* NextChr(PSZ s, char sep); +char* GetJsonNull(void); +const char* GetFmt(int type, bool un); + +DllExport bool IsNum(PSZ s); + +/***********************************************************************/ +/* Class BJSON. The class handling all BJSON operations. */ +/***********************************************************************/ +class BJSON : public BLOCK { +public: + // Constructor + BJSON(PGLOBAL g, PBVAL vp = NULL) + { G = g, Base = G->Sarea; Bvp = vp; Throw = true; } + + // Utility functions + inline OFFSET MOF(void *p) {return MakeOff(Base, p);} + inline void *MP(OFFSET o) {return MakePtr(Base, o);} + inline PBPR MPP(OFFSET o) {return (PBPR)MakePtr(Base, o);} + inline PBVAL MVP(OFFSET o) {return (PBVAL)MakePtr(Base, o);} + inline PSZ MZP(OFFSET o) {return (PSZ)MakePtr(Base, o);} + inline longlong LLN(OFFSET o) {return *(longlong*)MakePtr(Base, o);} + inline double DBL(OFFSET o) {return *(double*)MakePtr(Base, o);} + + void Reset(void) {Base = G->Sarea;} + void* GetBase(void) { return Base; } + void SubSet(bool b = false); + void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;} + void MemSet(size_t size); + void GetMsg(PGLOBAL g) { if (g != G) strcpy(g->Message, G->Message); } + + // SubAlloc functions + void* BsonSubAlloc(size_t size); + PBPR NewPair(OFFSET key, int type = TYPE_NULL); + OFFSET NewPair(PSZ key, int type = TYPE_NULL) + {return MOF(NewPair(DupStr(key), type));} + PBVAL NewVal(int type = TYPE_NULL); + PBVAL NewVal(PVAL valp); + PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0); + PBVAL SubAllocVal(PBVAL toval, int type = TYPE_NULL, short nd = 0) + {return SubAllocVal(MOF(toval), type, nd);} + PBVAL SubAllocStr(OFFSET str, short nd = 0); + PBVAL SubAllocStr(PSZ str, short nd = 0) + {return SubAllocStr(DupStr(str), nd);} + PBVAL DupVal(PBVAL bvp); + OFFSET DupStr(PSZ str) { return MOF(NewStr(str)); } + PSZ NewStr(PSZ str); + + // Array functions + inline PBVAL GetArray(PBVAL vlp) {return MVP(vlp->To_Val);} + int GetArraySize(PBVAL bap, bool b = false); + PBVAL GetArrayValue(PBVAL bap, int i); + PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text); + void MergeArray(PBVAL bap1,PBVAL bap2); + bool DeleteValue(PBVAL bap, int n); + void AddArrayValue(PBVAL bap, OFFSET nvp = 0, int* x = NULL); + inline void AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL) + {AddArrayValue(bap, MOF(nvp), x);} + void SetArrayValue(PBVAL bap, PBVAL nvp, int n); + bool IsArrayNull(PBVAL bap); + + // Object functions + inline PBPR GetObject(PBVAL bop) {return MPP(bop->To_Val);} + inline PBPR GetNext(PBPR brp) { return MPP(brp->Vlp.Next); } + void SetPairValue(PBPR brp, PBVAL bvp); + int GetObjectSize(PBVAL bop, bool b = false); + PSZ GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text); + PBVAL MergeObject(PBVAL bop1, PBVAL bop2); + PBVAL AddPair(PBVAL bop, PSZ key, int type = TYPE_NULL); + PSZ GetKey(PBPR prp) {return prp ? MZP(prp->Key) : NULL;} + PBVAL GetTo_Val(PBPR prp) {return prp ? MVP(prp->Vlp.To_Val) : NULL;} + PBVAL GetVlp(PBPR prp) {return prp ? (PBVAL)&prp->Vlp : NULL;} + PBVAL GetKeyValue(PBVAL bop, PSZ key); + PBVAL GetKeyList(PBVAL bop); + PBVAL GetObjectValList(PBVAL bop); + void SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key); + inline void SetKeyValue(PBVAL bop, PBVAL vlp, PSZ key) + {SetKeyValue(bop, MOF(vlp), key);} + bool DeleteKey(PBVAL bop, PCSZ k); + bool IsObjectNull(PBVAL bop); + + // Value functions + int GetSize(PBVAL vlp, bool b = false); + PBVAL GetNext(PBVAL vlp) {return MVP(vlp->Next);} + //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } + PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text); + PBVAL GetBson(PBVAL bvp); + PSZ GetString(PBVAL vp, char* buff = NULL); + int GetInteger(PBVAL vp); + long long GetBigint(PBVAL vp); + double GetDouble(PBVAL vp); + PVAL GetValue(PGLOBAL g, PBVAL vp); + void SetValueObj(PBVAL vlp, PBVAL bop); + void SetValueArr(PBVAL vlp, PBVAL bap); + void SetValueVal(PBVAL vlp, PBVAL vp); + PBVAL SetValue(PBVAL vlp, PVAL valp); + void SetString(PBVAL vlp, PSZ s, int ci = 0); + void SetInteger(PBVAL vlp, int n); + void SetBigint(PBVAL vlp, longlong ll); + void SetFloat(PBVAL vlp, double f, int nd = -1); + void SetFloat(PBVAL vlp, PSZ s); + void SetBool(PBVAL vlp, bool b); + void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; } + bool IsValueNull(PBVAL vlp); + bool IsJson(PBVAL vlp) {return vlp ? vlp->Type == TYPE_JAR || + vlp->Type == TYPE_JOB || + vlp->Type == TYPE_JVAL : false;} + + // Members + PGLOBAL G; + PBVAL Bvp; + void *Base; + bool Throw; + +protected: + // Default constructor not to be used + BJSON(void) {} +}; // end of class BJSON + +/***********************************************************************/ +/* Class JDOC. The class for parsing and serializing json documents. */ +/***********************************************************************/ +class BDOC : public BJSON { +public: + BDOC(PGLOBAL G); + + bool GetComma(void) { return comma; } + int GetPretty(void) { return pretty; } + void SetPretty(int pty) { pretty = pty; } + + // Methods + PBVAL ParseJson(PGLOBAL g, char* s, size_t n); + PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty); + +protected: + OFFSET ParseArray(size_t& i); + OFFSET ParseObject(size_t& i); + PBVAL ParseValue(size_t& i, PBVAL bvp); + OFFSET ParseString(size_t& i); + void ParseNumeric(size_t& i, PBVAL bvp); + OFFSET ParseAsArray(size_t& i); + bool SerializeArray(OFFSET arp, bool b); + bool SerializeObject(OFFSET obp); + bool SerializeValue(PBVAL vp, bool b = false); + + // Members used when parsing and serializing + JOUT* jp; // Used with serialize + char* s; // The Json string to parse + size_t len; // The Json string length + int pretty; // The pretty style of the file to parse + bool pty[3]; // Used to guess what pretty is + bool comma; // True if Pretty = 1 + + // Default constructor not to be used + BDOC(void) {} +}; // end of class BDOC diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp new file mode 100644 index 00000000000..29fe0a6bf22 --- /dev/null +++ b/storage/connect/bsonudf.cpp @@ -0,0 +1,6245 @@ +/****************** bsonudf C++ Program Source Code File (.CPP) ******************/ +/* PROGRAM NAME: bsonudf Version 1.0 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */ +/* This program are the BSON User Defined Functions. */ +/*********************************************************************************/ + +/*********************************************************************************/ +/* Include relevant sections of the MariaDB header file. */ +/*********************************************************************************/ +#include <my_global.h> +#include <mysqld.h> +#include <mysql.h> +#include <sql_error.h> +#include <stdio.h> + +#include "bsonudf.h" + +#if defined(UNIX) || defined(UNIV_LINUX) +#define _O_RDONLY O_RDONLY +#endif + +#define MEMFIX 4096 +#if defined(connect_EXPORTS) +#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M) +#else +#define PUSH_WARNING(M) htrc(M) +#endif +#define M 6 + +int JsonDefPrec = -1; +int GetDefaultPrec(void); +int IsArgJson(UDF_ARGS* args, uint i); +void SetChanged(PBSON bsp); +int GetJsonDefPrec(void); + +static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp); + +/* --------------------------------- JSON UDF ---------------------------------- */ + +/*********************************************************************************/ +/* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */ +/*********************************************************************************/ +int GetJsonDefPrec(void) { + return (JsonDefPrec < 0) ? GetDefaultPrec() : JsonDefPrec; +} /* end of GetJsonDefPrec */ + +/*********************************************************************************/ +/* Program for saving the status of the memory pools. */ +/*********************************************************************************/ +inline void JsonMemSave(PGLOBAL g) { + g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free; +} /* end of JsonMemSave */ + +/*********************************************************************************/ +/* Program for freeing the memory pools. */ +/*********************************************************************************/ +inline void JsonFreeMem(PGLOBAL g) { + g->Activityp = NULL; + g = PlugExit(g); +} /* end of JsonFreeMem */ + +/*********************************************************************************/ +/* Allocate and initialize a BSON structure. */ +/*********************************************************************************/ +static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp) +{ + PBSON bsp = (PBSON)PlgDBSubAlloc(g, NULL, sizeof(BSON)); + + if (bsp) { + strcpy(bsp->Msg, "Binary Json"); + bsp->Msg[BMX] = 0; + bsp->Filename = NULL; + bsp->G = g; + bsp->Pretty = 2; + bsp->Reslen = len; + bsp->Changed = false; + bsp->Top = bsp->Jsp = (PJSON)jsp; + bsp->Bsp = NULL; + } else + PUSH_WARNING(g->Message); + + return bsp; +} /* end of BbinAlloc */ + +/* --------------------------- New Testing BJSON Stuff --------------------------*/ + +/*********************************************************************************/ +/* SubAlloc a new BJNX class with protection against memory exhaustion. */ +/*********************************************************************************/ +static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) +{ + PBJNX bjnx; + + try { + bjnx = new(g) BJNX(g, vlp, type, len); + } catch (...) { + if (trace(1023)) + htrc("%s\n", g->Message); + + PUSH_WARNING(g->Message); + bjnx = NULL; + } // end try/catch + + return bjnx; +} /* end of BjnxNew */ + +/* ----------------------------------- BSNX ------------------------------------ */ + +/*********************************************************************************/ +/* BSNX public constructor. */ +/*********************************************************************************/ +BJNX::BJNX(PGLOBAL g) : BDOC(g) +{ + Row = NULL; + Bvalp = NULL; + Jpnp = NULL; + Jp = NULL; + Nodes = NULL; + Value = NULL; + MulVal = NULL; + Jpath = NULL; + Buf_Type = TYPE_STRING; + Long = len; + Prec = 0; + Nod = 0; + Xnod = -1; + K = 0; + I = -1; + Imax = 9; + B = 0; + Xpd = false; + Parsed = false; + Found = false; + Wr = false; + Jb = false; + Changed = false; + Throw = false; +} // end of BJNX constructor + +/*********************************************************************************/ +/* BSNX public constructor. */ +/*********************************************************************************/ +BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC(g) +{ + Row = row; + Bvalp = NULL; + Jpnp = NULL; + Jp = NULL; + Nodes = NULL; + Value = AllocateValue(g, type, len, prec); + MulVal = NULL; + Jpath = NULL; + Buf_Type = type; + Long = len; + Prec = prec; + Nod = 0; + Xnod = -1; + K = 0; + I = -1; + Imax = 9; + B = 0; + Xpd = false; + Parsed = false; + Found = false; + Wr = wr; + Jb = false; + Changed = false; + Throw = false; +} // end of BJNX constructor + +/*********************************************************************************/ +/* SetJpath: set and parse the json path. */ +/*********************************************************************************/ +my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) +{ + // Check Value was allocated + if (Value) + Value->SetNullable(true); + + Jpath = path; + + // Parse the json path + Parsed = false; + Nod = 0; + Jb = jb; + return ParseJpath(g); +} // end of SetJpath + +/*********************************************************************************/ +/* Analyse array processing options. */ +/*********************************************************************************/ +my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) +{ + int n = (int)strlen(p); + my_bool dg = true, b = false; + PJNODE jnp = &Nodes[i]; + + if (*p) { + if (p[n - 1] == ']') { + p[--n] = 0; + } else if (!IsNum(p)) { + // Wrong array specification + sprintf(g->Message, "Invalid array specification %s", p); + return true; + } // endif p + + } else + b = true; + + // To check whether a numeric Rank was specified + dg = IsNum(p); + + if (!n) { + // Default specifications + if (jnp->Op != OP_EXP) { + if (Wr) { + // Force append + jnp->Rank = INT_MAX32; + jnp->Op = OP_LE; + } else if (Jb) { + // Return a Json item + jnp->Op = OP_XX; + } else if (b) { + // Return 1st value (B is the index base) + jnp->Rank = B; + jnp->Op = OP_LE; + } else if (!Value->IsTypeNum()) { + jnp->CncVal = AllocateValue(g, PlugDup(g, ", "), TYPE_STRING); + jnp->Op = OP_CNC; + } else + jnp->Op = OP_ADD; + + } // endif OP + + } else if (dg) { + // Return nth value + jnp->Rank = atoi(p) - B; + jnp->Op = OP_EQ; + } else if (Wr) { + sprintf(g->Message, "Invalid specification %s in a write path", p); + return true; + } else if (n == 1) { + // Set the Op value; + switch (*p) { + case '+': jnp->Op = OP_ADD; break; + case 'x': jnp->Op = OP_MULT; break; + case '>': jnp->Op = OP_MAX; break; + case '<': jnp->Op = OP_MIN; break; + case '!': jnp->Op = OP_SEP; break; // Average + case '#': jnp->Op = OP_NUM; break; + case '*': jnp->Op = OP_EXP; break; + default: + sprintf(g->Message, "Invalid function specification %c", *p); + return true; + } // endswitch *p + + } else if (*p == '"' && p[n - 1] == '"') { + // This is a concat specification + jnp->Op = OP_CNC; + + if (n > 2) { + // Set concat intermediate string + p[n - 1] = 0; + + if (trace(1)) + htrc("Concat string=%s\n", p + 1); + + jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); + } // endif n + + } else { + strcpy(g->Message, "Wrong array specification"); + return true; + } // endif's + +#if 0 + // For calculated arrays, a local Value must be used + switch (jnp->Op) { + case OP_NUM: + jnp->Valp = AllocateValue(g, TYPE_INT); + break; + case OP_ADD: + case OP_MULT: + case OP_SEP: + if (!IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2); + + break; + case OP_MIN: + case OP_MAX: + jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision()); + break; + case OP_CNC: + if (IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_STRING, 512); + + break; + default: + break; + } // endswitch Op + + if (jnp->Valp) + MulVal = AllocateValue(g, jnp->Valp); +#endif // 0 + + return false; +} // end of SetArrayOptions + +/*********************************************************************************/ +/* Parse the eventual passed Jpath information. */ +/* This information can be specified in the Fieldfmt column option when */ +/* creating the table. It permits to indicate the position of the node */ +/* corresponding to that column. */ +/*********************************************************************************/ +my_bool BJNX::ParseJpath(PGLOBAL g) +{ + char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; + int i; + my_bool a, mul = false; + + if (Parsed) + return false; // Already done + else if (!Jpath) + // Jpath = Name; + return true; + + if (trace(1)) + htrc("ParseJpath %s\n", SVP(Jpath)); + + if (!(pbuf = PlgDBDup(g, Jpath))) + return true; + + if (*pbuf == '$') pbuf++; + if (*pbuf == '.') pbuf++; + if (*pbuf == '[') p1 = pbuf++; + + // Estimate the required number of nodes + for (i = 0, p = pbuf; (p = NextChr(p, '.')); i++, p++) + Nod++; // One path node found + + if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE)))) + return true; + + memset(Nodes, 0, (Nod) * sizeof(JNODE)); + + // Analyze the Jpath for this column + for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, '.'); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + // Jpath must be explicit + if (a || *p == 0 || *p == '[' || IsNum(p)) { + // Analyse intermediate array processing + if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) + return true; + + } else if (*p == '*') { + if (Wr) { + sprintf(g->Message, "Invalid specification %c in a write path", *p); + return true; + } else // Return JSON + Nodes[i].Op = OP_XX; + + } else { + Nodes[i].Key = p; + Nodes[i].Op = OP_EXIST; + } // endif's + + } // endfor i, p + + Nod = i; +//MulVal = AllocateValue(g, Value); + + if (trace(1)) + for (i = 0; i < Nod; i++) + htrc("Node(%d) Key=%s Op=%d Rank=%d\n", + i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank); + + Parsed = true; + return false; +} // end of ParseJpath + +/*********************************************************************************/ +/* Make a valid key from the passed argument. */ +/*********************************************************************************/ +PSZ BJNX::MakeKey(UDF_ARGS *args, int i) +{ + if (args->arg_count > (unsigned)i) { + int j = 0, n = args->attribute_lengths[i]; + my_bool b; // true if attribute is zero terminated + PSZ p; + PCSZ s = args->attributes[i]; + + if (s && *s && (n || *s == '\'')) { + if ((b = (!n || !s[n]))) + n = strlen(s); + + if (IsArgJson(args, i)) + j = (int)(strchr(s, '_') - s + 1); + + if (j && n > j) { + s += j; + n -= j; + } else if (*s == '\'' && s[n-1] == '\'') { + s++; + n -= 2; + b = false; + } // endif *s + + if (n < 1) + return NewStr((PSZ)"Key"); + + if (!b) { + p = (PSZ)BsonSubAlloc(n + 1); + memcpy(p, s, n); + p[n] = 0; + return p; + } // endif b + + } // endif s + + return NewStr((PSZ)s); + } // endif count + + return NewStr((PSZ)"Key"); +} // end of MakeKey + +/*********************************************************************************/ +/* MakeJson: Make the Json tree to serialize. */ +/*********************************************************************************/ +PBVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp, int n) +{ + PBVAL vlp, jvp = bvp; + + if (n < Nod -1) { + if (bvp->Type == TYPE_JAR) { + int ars = GetArraySize(bvp); + PJNODE jnp = &Nodes[n]; + + jvp = NewVal(TYPE_JAR); + jnp->Op = OP_EQ; + + for (int i = 0; i < ars; i++) { + jnp->Rank = i; + vlp = GetRowValue(g, bvp, n); + AddArrayValue(jvp, DupVal(vlp)); + } // endfor i + + jnp->Op = OP_XX; + jnp->Rank = 0; + } else if(bvp->Type == TYPE_JOB) { + jvp = NewVal(TYPE_JOB); + + for (PBPR prp = GetObject(bvp); prp; prp = GetNext(prp)) { + vlp = GetRowValue(g, GetVlp(prp), n + 1); + SetKeyValue(jvp, vlp, MZP(prp->Key)); + } // endfor prp + + } // endif Type + + } // endif n + + Jb = true; + return jvp; +} // end of MakeJson + +/*********************************************************************************/ +/* SetValue: Set a value from a BVALUE contains. */ +/*********************************************************************************/ +void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) +{ + if (vlp) { + vp->SetNull(false); + + if (Jb) { + vp->SetValue_psz(Serialize(g, vlp, NULL, 0)); + Jb = false; + } else switch (vlp->Type) { + case TYPE_DTM: + case TYPE_STRG: + vp->SetValue_psz(GetString(vlp)); + break; + case TYPE_INTG: + vp->SetValue(GetInteger(vlp)); + break; + case TYPE_BINT: + vp->SetValue(GetBigint(vlp)); + break; + case TYPE_DBL: + case TYPE_FLOAT: + if (vp->IsTypeNum()) + vp->SetValue(GetDouble(vlp)); + else // Get the proper number of decimals + vp->SetValue_psz(GetString(vlp)); + + break; + case TYPE_BOOL: + if (vp->IsTypeNum()) + vp->SetValue(GetInteger(vlp) ? 1 : 0); + else + vp->SetValue_psz(GetString(vlp)); + + break; + case TYPE_JAR: + vp->SetValue_psz(GetArrayText(g, vlp, NULL)); + break; + case TYPE_JOB: + vp->SetValue_psz(GetObjectText(g, vlp, NULL)); + break; + case TYPE_NULL: + vp->SetNull(true); + default: + vp->Reset(); + } // endswitch Type + + } else { + vp->SetNull(true); + vp->Reset(); + } // endif val + +} // end of SetJsonValue + +/*********************************************************************************/ +/* GetJson: */ +/*********************************************************************************/ +PBVAL BJNX::GetJson(PGLOBAL g) +{ + return GetRowValue(g, Row, 0); +} // end of GetJson + +/*********************************************************************************/ +/* ReadValue: */ +/*********************************************************************************/ +void BJNX::ReadValue(PGLOBAL g) +{ + Value->SetValue_pval(GetColumnValue(g, Row, 0)); +} // end of ReadValue + +/*********************************************************************************/ +/* GetColumnValue: */ +/*********************************************************************************/ +PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) +{ + PBVAL vlp = GetRowValue(g, row, i); + + SetJsonValue(g, Value, vlp); + return Value; +} // end of GetColumnValue + +/*********************************************************************************/ +/* GetRowValue: */ +/*********************************************************************************/ +PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i) +{ + my_bool expd = false; + PBVAL bap; + PBVAL vlp = NULL; + + for (; i < Nod && row; i++) { + if (Nodes[i].Op == OP_NUM) { + Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(row) : 1); + vlp = NewVal(Value); + return vlp; + } else if (Nodes[i].Op == OP_XX) { + return MakeJson(g, row, i); + } else if (Nodes[i].Op == OP_EXP) { + PUSH_WARNING("Expand not supported by this function"); + return NULL; + } else switch (row->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) { + // Expected Array was not there + if (Nodes[i].Op == OP_LE) { + if (i < Nod - 1) + continue; + else + vlp = row; // DupVal(g, row) ??? + + } else { + strcpy(g->Message, "Unexpected object"); + vlp = NULL; + } //endif Op + + } else + vlp = GetKeyValue(row, Nodes[i].Key); + + break; + case TYPE_JAR: + bap = row; + + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + vlp = GetArrayValue(bap, Nodes[i].Rank); + else if (Nodes[i].Op == OP_EXP) + return (PBVAL)ExpandArray(g, bap, i); + else + return NewVal(CalculateArray(g, bap, i)); + + } else { + // Unexpected array, unwrap it as [0] + vlp = GetArrayValue(bap, 0); + i--; + } // endif's + + break; + case TYPE_JVAL: + vlp = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + vlp = NULL; + } // endswitch Type + + row = vlp; + } // endfor i + + return vlp; +} // end of GetRowValue + +/*********************************************************************************/ +/* ExpandArray: */ +/*********************************************************************************/ +PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) +{ + strcpy(g->Message, "Expand cannot be done by this function"); + return NULL; +} // end of ExpandArray + +/*********************************************************************************/ +/* Get the value used for calculating the array. */ +/*********************************************************************************/ +PVAL BJNX::GetCalcValue(PGLOBAL g, PBVAL bap, int n) +{ + // For calculated arrays, a local Value must be used + int lng = 0; + short type, prec = 0; + bool b = n < Nod - 1; + PVAL valp; + PBVAL vlp, vp; + OPVAL op = Nodes[n].Op; + + switch (op) { + case OP_NUM: + type = TYPE_INT; + break; + case OP_ADD: + case OP_MULT: + if (!IsTypeNum(Buf_Type)) { + type = TYPE_INT; + prec = 0; + + for (vlp = GetArray(bap); vlp; vlp = GetNext(vlp)) { + vp = (b && IsJson(vlp)) ? GetRowValue(g, vlp, n + 1) : vlp; + + switch (vp->Type) { + case TYPE_BINT: + if (type == TYPE_INT) + type = TYPE_BIGINT; + + break; + case TYPE_DBL: + case TYPE_FLOAT: + type = TYPE_DOUBLE; + prec = MY_MAX(prec, vp->Nd); + break; + default: + break; + } // endswitch Type + + } // endfor vlp + + } else { + type = Buf_Type; + prec = GetPrecision(); + } // endif Buf_Type + + break; + case OP_SEP: + if (IsTypeChar(Buf_Type)) { + type = TYPE_DOUBLE; + prec = 2; + } else { + type = Buf_Type; + prec = GetPrecision(); + } // endif Buf_Type + + break; + case OP_MIN: + case OP_MAX: + type = Buf_Type; + lng = Long; + prec = GetPrecision(); + break; + case OP_CNC: + type = TYPE_STRING; + + if (IsTypeChar(Buf_Type)) { + lng = (Long) ? Long : 512; + prec = GetPrecision(); + } else + lng = 512; + + break; + default: + break; + } // endswitch Op + + return valp = AllocateValue(g, type, lng, prec); +} // end of GetCalcValue + +/*********************************************************************************/ +/* CalculateArray */ +/*********************************************************************************/ +PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) +{ + int i, ars = GetArraySize(bap), nv = 0; + bool err; + OPVAL op = Nodes[n].Op; + PVAL val[2], vp = GetCalcValue(g, bap, n); + PVAL mulval = AllocateValue(g, vp); + PBVAL bvrp, bvp; + BVAL bval; + + vp->Reset(); + xtrc(1, "CalculateArray size=%d op=%d\n", ars, op); + + try { + for (i = 0; i < ars; i++) { + bvrp = GetArrayValue(bap, i); + xtrc(1, "i=%d nv=%d\n", i, nv); + + if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { + if (IsValueNull(bvrp)) { + SetString(bvrp, NewStr(GetJsonNull()), 0); + bvp = bvrp; + } else if (n < Nod - 1 && IsJson(bvrp)) { + SetValue(&bval, GetColumnValue(g, bvrp, n + 1)); + bvp = &bval; + } else + bvp = bvrp; + + if (trace(1)) + htrc("bvp=%s null=%d\n", + GetString(bvp), IsValueNull(bvp) ? 1 : 0); + + if (!nv++) { + SetJsonValue(g, vp, bvp); + continue; + } else + SetJsonValue(g, mulval, bvp); + + if (!mulval->IsNull()) { + switch (op) { + case OP_CNC: + if (Nodes[n].CncVal) { + val[0] = Nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal + + val[0] = mulval; + err = vp->Compute(g, val, 1, op); + break; + // case OP_NUM: + case OP_SEP: + val[0] = vp; + val[1] = mulval; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = vp; + val[1] = mulval; + err = vp->Compute(g, val, 2, op); + } // endswitch Op + + if (err) + vp->Reset(); + + if (trace(1)) { + char buf(32); + + htrc("vp='%s' err=%d\n", + vp->GetCharString(&buf), err ? 1 : 0); + } // endif trace + + } // endif Zero + + } // endif jvrp + + } // endfor i + + if (op == OP_SEP) { + // Calculate average + mulval->SetValue(nv); + val[0] = vp; + val[1] = mulval; + + if (vp->Compute(g, val, 2, OP_DIV)) + vp->Reset(); + + } // endif Op + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return vp; +} // end of CalculateArray + +/***********************************************************************/ +/* GetRow: Set the complete path of the object to be set. */ +/***********************************************************************/ +PBVAL BJNX::GetRow(PGLOBAL g) +{ + PBVAL val = NULL; + PBVAL arp; + PBVAL nwr, row = Row; + + for (int i = 0; i < Nod - 1 && row; i++) { + if (Nodes[i].Op == OP_XX) + break; + else if (Nodes[i].Op == OP_EXP) { + PUSH_WARNING("Expand not supported by this function"); + return NULL; + } else switch (row->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) + // Expected Array was not there, wrap the value + continue; + + val = GetKeyValue(row, Nodes[i].Key); + break; + case TYPE_JAR: + arp = row; + + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EQ) + val = GetArrayValue(arp, Nodes[i].Rank); + else + val = GetArrayValue(arp, Nodes[i].Rx); + + } else { + // Unexpected array, unwrap it as [0] + val = GetArrayValue(arp, 0); + i--; + } // endif Nodes + + break; + case TYPE_JVAL: + val = MVP(row->To_Val); + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + val = NULL; + } // endswitch Type + + if (val) { + row = val; + } else { + // Construct missing objects + for (i++; row && i < Nod; i++) { + if (Nodes[i].Op == OP_XX) + break; + + // Construct new row + nwr = NewVal(); + + if (row->Type == TYPE_JOB) { + SetKeyValue(row, MOF(nwr), Nodes[i - 1].Key); + } else if (row->Type == TYPE_JAR) { + AddArrayValue(row, MOF(nwr)); + } else { + strcpy(g->Message, "Wrong type when writing new row"); + nwr = NULL; + } // endif's + + row = nwr; + } // endfor i + + break; + } // endelse + + } // endfor i + + return row; +} // end of GetRow + +/***********************************************************************/ +/* WriteValue: */ +/***********************************************************************/ +my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) +{ + PBVAL objp = NULL; + PBVAL arp = NULL; + PBVAL jvp = NULL; + PBVAL row = GetRow(g); + + if (!row) + return true; + + switch (row->Type) { + case TYPE_JOB: objp = row; break; + case TYPE_JAR: arp = row; break; + case TYPE_JVAL: jvp = MVP(row->To_Val); break; + default: + strcpy(g->Message, "Invalid target type"); + return true; + } // endswitch Type + + if (arp) { + if (!Nodes[Nod - 1].Key) { + if (Nodes[Nod - 1].Op == OP_EQ) + SetArrayValue(arp, jvalp, Nodes[Nod - 1].Rank); + else + AddArrayValue(arp, MOF(jvalp)); + + } // endif Key + + } else if (objp) { + if (Nodes[Nod - 1].Key) + SetKeyValue(objp, MOF(jvalp), Nodes[Nod - 1].Key); + + } else if (jvp) + SetValueVal(jvp, jvalp); + + return false; +} // end of WriteValue + +/*********************************************************************************/ +/* GetRowValue: */ +/*********************************************************************************/ +my_bool BJNX::DeleteItem(PGLOBAL g, PBVAL row) +{ + int n = -1; + my_bool b = false; + bool loop; + PBVAL vlp, pvp, rwp; + + do { + loop = false; + vlp = NULL; + pvp = rwp = row; + + for (int i = 0; i < Nod && rwp; i++) { + if (Nodes[i].Op == OP_XX) + break; + else switch (rwp->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) { + vlp = NULL; + } else + vlp = GetKeyValue(rwp, Nodes[i].Key); + + break; + case TYPE_JAR: + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EXP) { + if (loop) { + PUSH_WARNING("Only one expand can be handled"); + return b; + } // endif loop + + n++; + } else + n = Nodes[i].Rank; + + vlp = GetArrayValue(rwp, n); + + if (GetNext(vlp) && Nodes[i].Op == OP_EXP) + loop = true; + + } else + vlp = NULL; + + break; + case TYPE_JVAL: + vlp = rwp; + break; + default: + vlp = NULL; + } // endswitch Type + + pvp = rwp; + rwp = vlp; + vlp = NULL; + } // endfor i + + if (rwp) { + if (Nodes[Nod - 1].Op == OP_XX) { + if (!IsJson(rwp)) + rwp->Type = TYPE_NULL; + + rwp->To_Val = 0; + } else switch (pvp->Type) { + case TYPE_JOB: + b = DeleteKey(pvp, Nodes[Nod - 1].Key); + break; + case TYPE_JAR: + if (Nodes[Nod - 1].Op == OP_EXP) { + pvp->To_Val = 0; + loop = false; + } else + b = DeleteValue(pvp, n); + + break; + default: + break; + } // endswitch Type + + } // endif rwp + + } while (loop); + + return b; +} // end of DeleteItem + +/*********************************************************************************/ +/* CheckPath: Checks whether the path exists in the document. */ +/*********************************************************************************/ +my_bool BJNX::CheckPath(PGLOBAL g) +{ + PBVAL val = NULL; + PBVAL row = Row; + + for (int i = 0; i < Nod && row; i++) { + val = NULL; + + if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) { + } else switch (row->Type) { + case TYPE_JOB: + if (Nodes[i].Key) + val = GetKeyValue(row, Nodes[i].Key); + + break; + case TYPE_JAR: + if (!Nodes[i].Key) + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + val = GetArrayValue(row, Nodes[i].Rank); + + break; + case TYPE_JVAL: + val = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + } // endswitch Type + + if (i < Nod-1) + if (!(row = (IsJson(val)) ? val : NULL)) + val = NULL; + + } // endfor i + + return (val != NULL); +} // end of CheckPath + +/*********************************************************************************/ +/* Check if a path was specified and set jvp according to it. */ +/*********************************************************************************/ +my_bool BJNX::CheckPath(PGLOBAL g, UDF_ARGS *args, PBVAL jsp, PBVAL& jvp, int n) +{ + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == STRING_RESULT && args->args[i]) { + // A path to a subset of the json tree is given + char *path = MakePSZ(g, args, i); + + if (path) { + Row = jsp; + + if (SetJpath(g, path)) + return true; + + if (!(jvp = GetJson(g))) { + sprintf(g->Message, "No sub-item at '%s'", path); + return true; + } else + return false; + + } else { + strcpy(g->Message, "Path argument is null"); + return true; + } // endif path + + } // endif type + + jvp = jsp; + return false; +} // end of CheckPath + +/*********************************************************************************/ +/* Locate a value in a JSON tree: */ +/*********************************************************************************/ +PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) +{ + PSZ str = NULL; + my_bool b = false, err = true; + + g->Message[0] = 0; + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + return NULL; + } // endif jsp + + try { + // Write to the path string + Jp = new(g) JOUTSTR(g); + Jp->WriteChr('$'); + Bvalp = jvp; + K = k; + + switch (jsp->Type) { + case TYPE_JAR: + err = LocateArray(g, jsp); + break; + case TYPE_JOB: + err = LocateObject(g, jsp); + break; + case TYPE_JVAL: + err = LocateValue(g, MVP(jsp->To_Val)); + break; + default: + err = true; + } // endswitch Type + + if (err) { + if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); + + } else if (Found) { + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } // endif's + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return str; +} // end of Locate + +/*********************************************************************************/ +/* Locate in a JSON Array. */ +/*********************************************************************************/ +my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) +{ + char s[16]; + int n = GetArraySize(jarp); + size_t m = Jp->N; + + for (int i = 0; i < n && !Found; i++) { + Jp->N = m; + sprintf(s, "[%d]", i + B); + + if (Jp->WriteStr(s)) + return true; + + if (LocateValue(g, GetArrayValue(jarp, i))) + return true; + + } // endfor i + + return false; +} // end of LocateArray + +/*********************************************************************************/ +/* Locate in a JSON Object. */ +/*********************************************************************************/ +my_bool BJNX::LocateObject(PGLOBAL g, PBVAL jobp) +{ + size_t m; + + if (Jp->WriteChr('.')) + return true; + + m = Jp->N; + + for (PBPR pair = GetObject(jobp); pair && !Found; pair = GetNext(pair)) { + Jp->N = m; + + if (Jp->WriteStr(MZP(pair->Key))) + return true; + + if (LocateValue(g, GetVlp(pair))) + return true; + + } // endfor i + + return false; +} // end of LocateObject + +/*********************************************************************************/ +/* Locate a JSON Value. */ +/*********************************************************************************/ +my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) +{ + if (CompareTree(g, Bvalp, jvp)) + Found = (--K == 0); + else if (jvp->Type == TYPE_JAR) + return LocateArray(g, jvp); + else if (jvp->Type == TYPE_JOB) + return LocateObject(g, jvp); + + return false; +} // end of LocateValue + +/*********************************************************************************/ +/* Locate all occurrences of a value in a JSON tree: */ +/*********************************************************************************/ +PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) +{ + PSZ str = NULL; + my_bool b = false, err = true; + PJPN jnp; + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + return NULL; + } // endif jsp + + try { + jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx); + memset(jnp, 0, sizeof(JPN) * mx); + g->Message[0] = 0; + + // Write to the path string + Jp = new(g)JOUTSTR(g); + Bvalp = bvp; + Imax = mx - 1; + Jpnp = jnp; + Jp->WriteChr('['); + + switch (jsp->Type) { + case TYPE_JAR: + err = LocateArrayAll(g, jsp); + break; + case TYPE_JOB: + err = LocateObjectAll(g, jsp); + break; + case TYPE_JVAL: + err = LocateValueAll(g, MVP(jsp->To_Val)); + break; + default: + err = LocateValueAll(g, jsp); + } // endswitch Type + + if (!err) { + if (Jp->N > 1) + Jp->N--; + + Jp->WriteChr(']'); + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } else if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return str; +} // end of LocateAll + +/*********************************************************************************/ +/* Locate in a JSON Array. */ +/*********************************************************************************/ +my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) +{ + int i = 0; + + if (I < Imax) { + Jpnp[++I].Type = TYPE_JAR; + + for (PBVAL vp = GetArray(jarp); vp; vp = GetNext(vp)) { + Jpnp[I].N = i; + + if (LocateValueAll(g, GetArrayValue(jarp, i))) + return true; + + i++; + } // endfor i + + I--; + } // endif I + + return false; +} // end of LocateArrayAll + +/*********************************************************************************/ +/* Locate in a JSON Object. */ +/*********************************************************************************/ +my_bool BJNX::LocateObjectAll(PGLOBAL g, PBVAL jobp) +{ + if (I < Imax) { + Jpnp[++I].Type = TYPE_JOB; + + for (PBPR pair = GetObject(jobp); pair; pair = GetNext(pair)) { + Jpnp[I].Key = MZP(pair->Key); + + if (LocateValueAll(g, GetVlp(pair))) + return true; + + } // endfor i + + I--; + } // endif I + + return false; +} // end of LocateObjectAll + +/*********************************************************************************/ +/* Locate a JSON Value. */ +/*********************************************************************************/ +my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) +{ + if (CompareTree(g, Bvalp, jvp)) + return AddPath(); + else if (jvp->Type == TYPE_JAR) + return LocateArrayAll(g, jvp); + else if (jvp->Type == TYPE_JOB) + return LocateObjectAll(g, jvp); + + return false; +} // end of LocateValueAll + +/*********************************************************************************/ +/* Compare two JSON trees. */ +/*********************************************************************************/ +my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) +{ + if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2)) + return false; + + my_bool found = true; + + if (jp1->Type == TYPE_JAR) { + for (int i = 0; found && i < GetArraySize(jp1); i++) + found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i))); + + } else if (jp1->Type == TYPE_JOB) { + PBPR p1 = GetObject(jp1), p2 = GetObject(jp2); + + // Keys can be differently ordered + for (; found && p1 && p2; p1 = GetNext(p1)) + found = CompareValues(g, GetVlp(p1), GetKeyValue(jp2, GetKey(p1))); + + } else if (jp1->Type == TYPE_JVAL) { + found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val))); + } else + found = CompareValues(g, jp1, jp2); + + return found; +} // end of CompareTree + +/*********************************************************************************/ +/* Compare two VAL values and return true if they are equal. */ +/*********************************************************************************/ +my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) +{ + my_bool b = false; + + if (v1 && v2) + switch (v1->Type) { + case TYPE_JAR: + case TYPE_JOB: + if (v2->Type == v1->Type) + b = CompareTree(g, v1, v2); + + break; + case TYPE_STRG: + if (v2->Type == TYPE_STRG) { + if (v1->Nd || v2->Nd) // Case insensitive + b = (!stricmp(MZP(v1->To_Val), MZP(v2->To_Val))); + else + b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); + + } // endif Type + + break; + case TYPE_DTM: + if (v2->Type == TYPE_DTM) + b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); + + break; + case TYPE_INTG: + if (v2->Type == TYPE_INTG) + b = (v1->N == v2->N); + else if (v2->Type == TYPE_BINT) + b = ((longlong)v1->N == LLN(v2->To_Val)); + + break; + case TYPE_BINT: + if (v2->Type == TYPE_INTG) + b = (LLN(v1->To_Val) == (longlong)v2->N); + else if (v2->Type == TYPE_BINT) + b = (LLN(v1->To_Val) == LLN(v2->To_Val)); + + break; + case TYPE_FLOAT: + if (v2->Type == TYPE_FLOAT) + b = (v1->F == v2->F); + else if (v2->Type == TYPE_DBL) + b = ((double)v1->F == DBL(v2->To_Val)); + + break; + case TYPE_DBL: + if (v2->Type == TYPE_DBL) + b = (DBL(v1->To_Val) == DBL(v2->To_Val)); + else if (v2->Type == TYPE_FLOAT) + b = (DBL(v1->To_Val) == (double)v2->F); + + break; + case TYPE_BOOL: + if (v2->Type == TYPE_BOOL) + b = (v1->B == v2->B); + + break; + case TYPE_NULL: + b = (v2->Type == TYPE_NULL); + break; + default: + break; + } // endswitch Type + + else + b = (!v1 && !v2); + + return b; +} // end of CompareValues + +/*********************************************************************************/ +/* Add the found path to the list. */ +/*********************************************************************************/ +my_bool BJNX::AddPath(void) +{ + char s[16]; + + if (Jp->WriteStr("\"$")) + return true; + + for (int i = 0; i <= I; i++) { + if (Jpnp[i].Type == TYPE_JAR) { + sprintf(s, "[%d]", Jpnp[i].N + B); + + if (Jp->WriteStr(s)) + return true; + + } else { + if (Jp->WriteChr('.')) + return true; + + if (Jp->WriteStr(Jpnp[i].Key)) + return true; + + } // endif's + + } // endfor i + + if (Jp->WriteStr("\",")) + return true; + + return false; +} // end of AddPath + +/*********************************************************************************/ +/* Make a JSON value from the passed argument. */ +/*********************************************************************************/ +PBVAL BJNX::MakeValue(UDF_ARGS *args, uint i, bool b, PBVAL *top) +{ + char *sap = (args->arg_count > i) ? args->args[i] : NULL; + int n, len; + int ci; + long long bigint; + PGLOBAL& g = G; + PBVAL jvp = NewVal(); + + if (top) + *top = NULL; + + if (sap) switch (args->arg_type[i]) { + case STRING_RESULT: + if ((len = args->lengths[i])) { + if ((n = IsArgJson(args, i)) < 3) + sap = MakePSZ(g, args, i); + + if (n) { + if (n == 3) { + PBSON bsp = (PBSON)sap; + + if (i == 0) { + if (top) + *top = (PBVAL)bsp->Top; + + jvp = (PBVAL)bsp->Jsp; + G = bsp->G; + Base = G->Sarea; + } else { + BJNX bnx(bsp->G); + + jvp = MoveJson(&bnx, (PBVAL)bsp->Jsp); + } // endelse i + + } else { + if (n == 2) { + if (!(sap = GetJsonFile(g, sap))) { + PUSH_WARNING(g->Message); + return jvp; + } // endif sap + + len = strlen(sap); + } // endif n + + if (!(jvp = ParseJson(g, sap, strlen(sap)))) + PUSH_WARNING(g->Message); + else if (top) + *top = jvp; + + } // endif's n + + } else { + PBVAL bp = NULL; + + if (b) { + if (strchr("[{ \t\r\n", *sap)) { + // Check whether this string is a valid json string + JsonMemSave(g); + + if (!(bp = ParseJson(g, sap, strlen(sap)))) + JsonSubSet(g); // Recover suballocated memory + + g->Saved_Size = 0; + } else { + // Perhaps a file name + char* s = GetJsonFile(g, sap); + + if (s) + bp = ParseJson(g, s, strlen(s)); + + } // endif's + + } // endif b + + if (!bp) { + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + SetString(jvp, sap, ci); + } else { + if (top) + *top = bp; + + jvp = bp; + } // endif bp + + } // endif n + + } // endif len + + break; + case INT_RESULT: + bigint = *(long long*)sap; + + if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || + (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) + SetBool(jvp, (char)bigint); + else + SetBigint(jvp, bigint); + + break; + case REAL_RESULT: + SetFloat(jvp, *(double*)sap); + break; + case DECIMAL_RESULT: + SetFloat(jvp, MakePSZ(g, args, i)); + break; + case TIME_RESULT: + case ROW_RESULT: + default: + break; + } // endswitch arg_type + + return jvp; +} // end of MakeValue + +/*********************************************************************************/ +/* Try making a JSON value of the passed type from the passed argument. */ +/*********************************************************************************/ +PBVAL BJNX::MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, JTYP type, PBVAL *top) +{ + char *sap; + PBVAL jsp; + PBVAL jvp = MakeValue(args, i, false, top); + + //if (type == TYPE_JSON) { + // if (jvp->GetValType() >= TYPE_JSON) + // return jvp; + + //} else if (jvp->GetValType() == type) + // return jvp; + + if (jvp->Type == TYPE_STRG) { + sap = GetString(jvp); + + if ((jsp = ParseJson(g, sap, strlen(sap)))) { + if ((type == TYPE_JSON && jsp->Type != TYPE_JVAL) || jsp->Type == type) { + if (top) + *top = jvp; + + SetValueVal(jvp, jsp); + } // endif Type + + } // endif jsp + + } // endif Type + + return jvp; +} // end of MakeTypedValue + +/*********************************************************************************/ +/* Parse a json file. */ +/*********************************************************************************/ +PBVAL BJNX::ParseJsonFile(PGLOBAL g, char *fn, int& pty, size_t& len) +{ + char *memory; + HANDLE hFile; + MEMMAP mm; + PBVAL jsp; + + // Create the mapping file object + hFile = CreateFileMap(g, fn, &mm, MODE_READ, false); + + if (hFile == INVALID_HANDLE_VALUE) { + DWORD rc = GetLastError(); + + if (!(*g->Message)) + sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int)rc, fn); + + return NULL; + } // endif hFile + + // Get the file size + len = (size_t)mm.lenL; + + if (mm.lenH) + len += mm.lenH; + + memory = (char *)mm.memory; + + if (!len) { // Empty or deleted file + CloseFileHandle(hFile); + return NULL; + } // endif len + + if (!memory) { + CloseFileHandle(hFile); + sprintf(g->Message, MSG(MAP_VIEW_ERROR), fn, GetLastError()); + return NULL; + } // endif Memory + + CloseFileHandle(hFile); // Not used anymore + + // Parse the json file and allocate its tree structure + g->Message[0] = 0; + jsp = ParseJson(g, memory, len); + pty = pretty; + CloseMemMap(memory, len); + return jsp; +} // end of ParseJsonFile + +/*********************************************************************************/ +/* Make the result according to the first argument type. */ +/*********************************************************************************/ +char *BJNX::MakeResult(UDF_ARGS *args, PBVAL top, uint n) +{ + char *str = NULL; + PGLOBAL& g = G; + + if (IsArgJson(args, 0) == 2) { + // Make the change in the json file + PSZ fn = MakePSZ(g, args, 0); + + if (Changed) { + int pretty = 2; + + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + pretty = (int)*(longlong*)args->args[i]; + break; + } // endif type + + if (!Serialize(g, top, fn, pretty)) + PUSH_WARNING(g->Message); + + Changed = false; + } // endif Changed + + str = fn; + } else if (IsArgJson(args, 0) == 3) { + PBSON bsp = (PBSON)args->args[0]; + + if (bsp->Filename) { + if (Changed) { + // Make the change in the json file + if (!Serialize(g, (PBVAL)top, bsp->Filename, bsp->Pretty)) + PUSH_WARNING(g->Message); + + Changed = false; + } // endif Changed + + str = bsp->Filename; + } else if (!(str = Serialize(g, (PBVAL)top, NULL, 0))) + PUSH_WARNING(g->Message); + + } else if (!(str = Serialize(g, top, NULL, 0))) + PUSH_WARNING(g->Message); + + return str; +} // end of MakeResult + +/*********************************************************************************/ +/* Make the binary result according to the first argument type. */ +/*********************************************************************************/ +PBSON BJNX::MakeBinResult(UDF_ARGS *args, PBVAL top, ulong len, int n) +{ + char* filename = NULL; + int pretty = 2; + PBSON bnp = NULL; + + if (IsArgJson(args, 0) == 3) { + bnp = (PBSON)args->args[0]; + + if (bnp->Top != (PJSON)top) + bnp->Top = bnp->Jsp = (PJSON)top; + + return bnp; + } // endif 3 + + if (IsArgJson(args, 0) == 2) { + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + pretty = (int)*(longlong*)args->args[i]; + break; + } // endif type + + filename = (char*)args->args[0]; + } // endif 2 + + if ((bnp = BbinAlloc(G, len, top))) { + bnp->Filename = filename; + bnp->Pretty = pretty; + strcpy(bnp->Msg, "Json Binary item"); + } //endif bnp + + return bnp; +} // end of MakeBinResult + +/***********************************************************************/ +/* Move a Json val block from one area to the current area. */ +/***********************************************************************/ +PBVAL BJNX::MoveVal(PBVAL vlp) +{ + PBVAL nvp = NewVal(vlp->Type); + + nvp->Nd = vlp->Nd; + return nvp; +} // end of MovedVal + +/***********************************************************************/ +/* Move a Json tree from one area to current area. */ +/***********************************************************************/ +PBVAL BJNX::MoveJson(PBJNX bxp, PBVAL jvp) +{ + PBVAL res = NULL; + + if (jvp) + switch (jvp->Type) { + case TYPE_JAR: + res = MoveArray(bxp, jvp); + break; + case TYPE_JOB: + res = MoveObject(bxp, jvp); + break; + default: + res = MoveValue(bxp, jvp); + break; + } // endswitch Type + + return res; +} // end of MoveJson + +/***********************************************************************/ +/* Move an array. */ +/***********************************************************************/ +PBVAL BJNX::MoveArray(PBJNX bxp, PBVAL jap) +{ + PBVAL vlp, vmp, jvp = NULL, jarp = MoveVal(jap); + + for (vlp = bxp->GetArray(jap); vlp; vlp = bxp->GetNext(vlp)) { + vmp = MoveJson(bxp, vlp); + + if (jvp) + jvp->Next = MOF(vmp); + else + jarp->To_Val = MOF(vmp); + + jvp = vmp; + } // endfor vlp + + return jarp; +} // end of MoveArray + +/***********************************************************************/ +/* Replace all object pointers by offsets. */ +/***********************************************************************/ +PBVAL BJNX::MoveObject(PBJNX bxp, PBVAL jop) +{ + PBPR mpp, prp, ppp = NULL; + PBVAL vmp, jobp = MoveVal(jop); + + for (prp = bxp->GetObject(jop); prp; prp = bxp->GetNext(prp)) { + vmp = MoveJson(bxp, GetVlp(prp)); + mpp = NewPair(DupStr(bxp->MZP(prp->Key))); + SetPairValue(mpp, vmp); + + if (ppp) + ppp->Vlp.Next = MOF(mpp); + else + jobp->To_Val = MOF(mpp); + + ppp = mpp; + } // endfor vlp + + return jobp; +} // end of MoffObject + +/***********************************************************************/ +/* Move a non json value. */ +/***********************************************************************/ +PBVAL BJNX::MoveValue(PBJNX bxp, PBVAL jvp) +{ + double *dp; + PBVAL nvp = MoveVal(jvp); + + switch (jvp->Type) { + case TYPE_STRG: + case TYPE_DTM: + nvp->To_Val = DupStr(bxp->MZP(jvp->To_Val)); + break; + case TYPE_DBL: + dp = (double*)BsonSubAlloc(sizeof(double)); + *dp = bxp->DBL(jvp->To_Val); + nvp->To_Val = MOF(dp); + break; + case TYPE_JVAL: + nvp->To_Val = MOF(MoveJson(bxp, bxp->MVP(jvp->To_Val))); + break; + default: + nvp->To_Val = jvp->To_Val; + break; + } // endswith Type + + return nvp; +} // end of MoveValue + +/* -----------------------------Utility functions ------------------------------ */ + +/*********************************************************************************/ +/* Returns a pointer to the first integer argument found from the nth argument. */ +/*********************************************************************************/ +static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n) +{ + int *x = NULL; + + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + if (args->args[i]) { + if ((x = (int*)PlgDBSubAlloc(g, NULL, sizeof(int)))) + *x = (int)*(longlong*)args->args[i]; + else + PUSH_WARNING(g->Message); + + } // endif args + + n = i + 1; + break; + } // endif arg_type + + return x; +} // end of GetIntArgPtr + +/*********************************************************************************/ +/* Returns not 0 if the argument is a JSON item or file name. */ +/*********************************************************************************/ +int IsArgJson(UDF_ARGS *args, uint i) +{ + int n = 0; + + if (i >= args->arg_count || args->arg_type[i] != STRING_RESULT) { + } else if (!strnicmp(args->attributes[i], "Bson_", 5) || + !strnicmp(args->attributes[i], "Json_", 5)) { + if (!args->args[i] || strchr("[{ \t\r\n", *args->args[i])) + n = 1; // arg should be is a json item +// else +// n = 2; // A file name may have been returned + + } else if (!strnicmp(args->attributes[i], "Bbin_", 5)) { + if (args->lengths[i] == sizeof(BSON)) + n = 3; // arg is a binary json item +// else +// n = 2; // A file name may have been returned + + } else if (!strnicmp(args->attributes[i], "Bfile_", 6) || + !strnicmp(args->attributes[i], "Jfile_", 6)) { + n = 2; // arg is a json file name +#if 0 + } else if (args->lengths[i]) { + PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024); + char *sap = MakePSZ(g, args, i); + + if (ParseJson(g, sap, strlen(sap))) + n = 4; + + JsonFreeMem(g); +#endif // 0 + } // endif's + + return n; +} // end of IsArgJson + +/*********************************************************************************/ +/* GetFileLength: returns file size in number of bytes. */ +/*********************************************************************************/ +static long GetFileLength(char *fn) +{ + int h; + long len; + + h= open(fn, _O_RDONLY); + + if (h != -1) { + if ((len = _filelength(h)) < 0) + len = 0; + + close(h); + } else + len = 0; + + return len; +} // end of GetFileLength + +/* ------------------------- Now the new Bin UDF's ----------------------------- */ + +/*********************************************************************************/ +/* Make a Json value containing the parameter. */ +/*********************************************************************************/ +my_bool bsonvalue_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + unsigned long reslen, memlen; + + if (args->arg_count > 1) { + strcpy(message, "Cannot accept more than 1 argument"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bsonvalue_init + +char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char*, char*) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, 1, false)) { + BJNX bnx(g); + PBVAL bvp = bnx.MakeValue(args, 0, true); + + if (!(str = bnx.Serialize(g, bvp, NULL, 0))) + str = strcpy(result, g->Message); + + } else + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bsonValue + +void bsonvalue_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonvalue_deinit + +/*********************************************************************************/ +/* Make a Json array containing all the parameters. */ +/* Note: jvp must be set before arp because it can be a binary argument. */ +/*********************************************************************************/ +my_bool bson_make_array_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + unsigned long reslen, memlen; + + CalcLen(args, false, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_make_array_init + +char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char*, char*) +{ + char* str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false)) { + BJNX bnx(g); + PBVAL jvp = bnx.MakeValue(args, 0); + PBVAL arp = bnx.NewVal(TYPE_JAR); + + for (uint i = 0; i < args->arg_count;) { + bnx.AddArrayValue(arp, jvp); + jvp = bnx.MakeValue(args, ++i); + } // endfor i + + if (!(str = bnx.Serialize(g, arp, NULL, 0))) + str = strcpy(result, g->Message); + + } else + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_make_array + +void bson_make_array_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_make_array_deinit + +/*********************************************************************************/ +/* Add one or several values to a Bson array. */ +/*********************************************************************************/ +my_bool bson_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + //} else if (!IsArgJson(args, 0, true)) { + // strcpy(message, "First argument must be a valid json string or item"); + // return true; + } else + CalcLen(args, false, reslen, memlen); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_array_add_values_init + +char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char*) { + char* str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, true)) { + BJNX bnx(g); + PBVAL arp = bnx.MakeValue(args, 0, true); + + if (arp->Type != TYPE_JAR) { + PUSH_WARNING("First argument is not an array"); + goto fin; + } // endif arp + + for (uint i = 1; i < args->arg_count; i++) + bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); + + bnx.SetChanged(true); + str = bnx.MakeResult(args, arp, INT_MAX); + } // endif CheckMemory + + if (!str) { + PUSH_WARNING(g->Message); + str = args->args[0]; + } // endif str + + // Keep result of constant function + g->Xchk = (g->N) ? str : NULL; + } else + str = (char*)g->Xchk; + + fin: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_array_add_values + +void bson_array_add_values_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_add_values_deinit + +/*********************************************************************************/ +/* Add one value to a Json array. */ +/*********************************************************************************/ +my_bool bson_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + //} else if (!IsArgJson(args, 0, true)) { + // strcpy(message, "First argument is not a valid Json item"); + // return true; + } else + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_array_add_init + +char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, false, true)) { + int *x; + uint n = 2; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL jsp, top; + PBVAL arp, jvp = bnx.MakeValue(args, 0, true, &top); + + jsp = jvp; + x = GetIntArgPtr(g, args, n); + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp) { + if (jvp->Type != TYPE_JAR) { + if ((arp = bnx.NewVal(TYPE_JAR))) { + bnx.AddArrayValue(arp, jvp); + + if (!top) + top = arp; + + } // endif arp + + } else + arp = jvp; + + if (arp) { + bnx.AddArrayValue(arp, bnx.MakeValue(args, 1), x); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top, n); + } else + PUSH_WARNING(g->Message); + + } else { + PUSH_WARNING("Target is not an array"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_array_add + +void bson_array_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_add_deinit + +/*********************************************************************************/ +/* Delete a value from a Json array. */ +/*********************************************************************************/ +my_bool bson_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_array_delete_init + +char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, false, true)) { + int *x; + uint n = 1; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL arp, top; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (!(x = GetIntArgPtr(g, args, n))) + PUSH_WARNING("Missing or null array index"); + else if (bnx.CheckPath(g, args, jvp, arp, 1)) + PUSH_WARNING(g->Message); + else if (arp && arp->Type == TYPE_JAR) { + bnx.DeleteValue(arp, *x); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top, n); + } else { + PUSH_WARNING("First argument target is not an array"); + // if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_array_delete + +void bson_array_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_delete_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the parameters. */ +/*********************************************************************************/ +my_bool bson_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_make_object_init + +char *bson_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false, false, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i)); + + str = bnx.Serialize(g, objp, NULL, 0); + } // endif objp + + } // endif CheckMemory + + if (!str) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_make_object + +void bson_make_object_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_make_object_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all not null parameters. */ +/*********************************************************************************/ +my_bool bson_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, + char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_object_nonull_init + +char *bson_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL jvp, objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i))) + bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); + + str = bnx.Serialize(g, objp, NULL, 0); + } // endif objp + + } // endif CheckMemory + + if (!str) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_object_nonull + +void bson_object_nonull_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_nonull_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the key/value parameters. */ +/*********************************************************************************/ +my_bool bson_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count % 2) { + strcpy(message, "This function must have an even number of arguments"); + return true; + } // endif arg_count + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_object_key_init + +char *bson_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i += 2) + bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i)); + + str = bnx.Serialize(g, objp, NULL, 0); + } // endif objp + + } // endif CheckMemory + + if (!str) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_object_key + +void bson_object_key_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_key_deinit + +/*********************************************************************************/ +/* Add or replace a value in a Json Object. */ +/*********************************************************************************/ +my_bool bson_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_object_add_init + +char *bson_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PSZ key; + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, true, true)) { + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL jvp, objp; + PBVAL jsp, top; + + jsp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp && jvp->Type == TYPE_JOB) { + objp = jvp; + jvp = bnx.MakeValue(args, 1); + key = bnx.MakeKey(args, 1); + bnx.SetKeyValue(objp, jvp, key); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_add + +void bson_object_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_add_deinit + +/*********************************************************************************/ +/* Delete a value from a Json object. */ +/*********************************************************************************/ +my_bool bson_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument must be a key string"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_object_delete_init + +char *bson_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, true, true)) { + bool chg; + BJNX bnx(g, NULL, TYPE_STRG); + PSZ key; + PBVAL jsp, objp, top; + PBVAL jvp = bnx.MakeValue(args, 0, false, &top); + + jsp = jvp; + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp && jvp->Type == TYPE_JOB) { + key = bnx.MakeKey(args, 1); + objp = jvp; + chg = bnx.DeleteKey(objp, key); + bnx.SetChanged(chg); + str = bnx.MakeResult(args, top); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_delete + +void bson_object_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_delete_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object keys. */ +/*********************************************************************************/ +my_bool bson_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count != 1) { + strcpy(message, "This function must have 1 argument"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "Argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bson_object_list_init + +char *bson_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->N) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + PBVAL jarp; + PBVAL jsp = bnx.MakeValue(args, 0, true); + + if (jsp->Type == TYPE_JOB) { + jarp = bnx.GetKeyList(jsp); + + if (!(str = bnx.Serialize(g, jarp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = str; + g->N = 1; // str can be NULL + } // endif const_item + + } else + str = (char*)g->Xchk; + + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_list + +void bson_object_list_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_list_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object values. */ +/*********************************************************************************/ +my_bool bson_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count != 1) { + strcpy(message, "This function must have 1 argument"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "Argument must be a json object"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bson_object_values_init + +char *bson_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->N) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + char *p; + PBVAL jsp, jarp; + PBVAL jvp = bnx.MakeValue(args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif jsp + + } else + jsp = jvp; + + if (jsp->Type == TYPE_JOB) { + jarp = bnx.GetObjectValList(jsp); + + if (!(str = bnx.Serialize(g, jarp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = str; + g->N = 1; // str can be NULL + } // endif const_item + + } else + str = (char*)g->Xchk; + + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_values + +void bson_object_values_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_values_deinit + +/*********************************************************************************/ +/* Set the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonset_def_prec_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) { + strcpy(message, "This function must have 1 integer argument"); + return true; + } else + return false; + +} // end of bsonset_def_prec_init + +long long bsonset_def_prec(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + long long n = *(long long*)args->args[0]; + + JsonDefPrec = (int)n; + return (long long)GetJsonDefPrec(); +} // end of bsonset_def_prec + +/*********************************************************************************/ +/* Get the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonget_def_prec_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 0) { + strcpy(message, "This function must have no arguments"); + return true; + } else + return false; + +} // end of bsonget_def_prec_init + +long long bsonget_def_prec(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + return (long long)GetJsonDefPrec(); +} // end of bsonget_def_prec + +/*********************************************************************************/ +/* Set the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonset_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) { + strcpy(message, "This function must have 1 integer argument"); + return true; + } else + return false; + +} // end of bsonset_grp_size_init + +long long bsonset_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + long long n = *(long long*)args->args[0]; + + JsonGrpSize = (uint)n; + return (long long)GetJsonGroupSize(); +} // end of bsonset_grp_size + +/*********************************************************************************/ +/* Get the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonget_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 0) { + strcpy(message, "This function must have no arguments"); + return true; + } else + return false; + +} // end of bsonget_grp_size_init + +long long bsonget_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + return (long long)GetJsonGroupSize(); +} // end of bsonget_grp_size + +/*********************************************************************************/ +/* Make a Json array from values coming from rows. */ +/*********************************************************************************/ +my_bool bson_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, n = GetJsonGroupSize(); + + if (args->arg_count != 1) { + strcpy(message, "This function can only accept 1 argument"); + return true; + } else if (IsArgJson(args, 0) == 3) { + strcpy(message, "This function does not support Jbin arguments"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + reslen *= n; + memlen += ((memlen - MEMFIX) * (n - 1)); + + if (JsonInit(initid, args, message, false, reslen, memlen)) + return true; + + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = new(g) BJNX(g); + + JsonMemSave(g); + return false; +} // end of bson_array_grp_init + +void bson_array_grp_clear(UDF_INIT *initid, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + + JsonSubSet(g); + g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JAR); + g->N = GetJsonGroupSize(); +} // end of bson_array_grp_clear + +void bson_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL arp = (PBVAL)g->Activityp; + + if (arp && g->N-- > 0) + bxp->AddArrayValue(arp, bxp->MakeValue(args, 0)); + +} // end of bson_array_grp_add + +char *bson_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *, char *) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL arp = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (!arp || !(str = bxp->Serialize(g, arp, NULL, 0))) + str = strcpy(result, g->Message); + + *res_length = strlen(str); + return str; +} // end of bson_array_grp + +void bson_array_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_grp_deinit + +/*********************************************************************************/ +/* Make a Json object from values coming from rows. */ +/*********************************************************************************/ +my_bool bson_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, n = GetJsonGroupSize(); + + if (args->arg_count != 2) { + strcpy(message, "This function requires 2 arguments (key, value)"); + return true; + } else if (IsArgJson(args, 0) == 3) { + strcpy(message, "This function does not support Jbin arguments"); + return true; + } else + CalcLen(args, true, reslen, memlen); + + reslen *= n; + memlen += ((memlen - MEMFIX) * (n - 1)); + + if (JsonInit(initid, args, message, false, reslen, memlen)) + return true; + + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = new(g) BJNX(g); + + JsonMemSave(g); + return false; +} // end of bson_object_grp_init + +void bson_object_grp_clear(UDF_INIT *initid, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + + JsonSubSet(g); + g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JOB); + g->N = GetJsonGroupSize(); +} // end of bson_object_grp_clear + +void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL bop = (PBVAL)g->Activityp; + + if (g->N-- > 0) + bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0)); + +} // end of bson_object_grp_add + +char *bson_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *, char *) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL bop = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (!bop || !(str = bxp->Serialize(g, bop, NULL, 0))) + str = strcpy(result, g->Message); + + *res_length = strlen(str); + return str; +} // end of bson_object_grp + +void bson_object_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_grp_deinit + +/*********************************************************************************/ +/* Test BJSON parse and serialize. */ +/*********************************************************************************/ +my_bool bson_test_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count == 0) { + strcpy(message, "At least 1 argument required (json)"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bson_test_init + +char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* str = NULL, * sap = NULL, * fn = NULL; + int pretty = 1; + PBVAL bvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + str = (char*)g->Activityp; + goto err; + } else if (initid->const_item) + g->N = 1; + + try { + BJNX bnx(g); + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else // Sarea may have been reallocated + bnx.Reset(); + + bvp = bnx.MakeValue(args, 0, true); + + if (bvp->Type == TYPE_NULL) { + PUSH_WARNING(g->Message); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + for (uint i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == STRING_RESULT) + fn = args->args[i]; + else if (args->arg_type[i] == INT_RESULT) + pretty = (int)*(longlong*)args->args[i]; + + // Serialize the parse tree + str = bnx.Serialize(g, bvp, fn, pretty); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + xtrc(1, "json_test_bson: error %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + str = NULL; + } // end catch + +err: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_test + +void bson_test_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_test_deinit + +/*********************************************************************************/ +/* Locate a value in a Json tree. */ +/*********************************************************************************/ +my_bool bsonlocate_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (rank)"); + return true; + } // endifs args + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + if (IsArgJson(args, 0) == 3) + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonlocate_init + +char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char *path = NULL; + int k; + PBVAL bvp, bvp2; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (g->Activityp) { + path = (char*)g->Activityp; + *res_length = strlen(path); + return path; + } else { + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + BJNX bnx(g); + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else { + bnx.Reset(); // Sarea may have been re-allocated + bvp = bnx.MakeValue(args, 0, true); + + if (!bvp) { + bnx.GetMsg(g); + PUSH_WARNING(g->Message); + goto err; + } else if (bvp->Type == TYPE_NULL) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } // endif CheckMemory + + } else + bvp = (PBVAL)g->Xchk; + + // The item to locate + bvp2 = bnx.MakeValue(args, 1, true); + + if (bvp2->Type == TYPE_NULL) { + PUSH_WARNING("Invalid second argument"); + goto err; + } // endif bvp + + k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; + +// bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnx.Locate(g, bvp, bvp2, k); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!path) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(path); + + return path; +} // end of bsonlocate + +void bsonlocate_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonlocate_deinit + +/*********************************************************************************/ +/* Locate all occurences of a value in a Json tree. */ +/*********************************************************************************/ +my_bool bson_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (Depth)"); + return true; + } // endifs + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + if (IsArgJson(args, 0) == 3) + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bson_locate_all_init + +char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* path = NULL; + int mx = 10; + PBVAL bvp, bvp2; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (g->Activityp) { + path = (char*)g->Activityp; + *res_length = strlen(path); + return path; + } else { + *error = 1; + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + BJNX bnx(g); + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bnx.Reset(); + + bvp = bnx.MakeValue(args, 0, true); + + if (bvp->Type == TYPE_NULL) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + // The item to locate + bvp2 = bnx.MakeValue(args, 1, true); + + if (bvp2->Type == TYPE_NULL) { + PUSH_WARNING("Invalid second argument"); + goto err; + } // endif bvp + + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; + +// bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnx.LocateAll(g, bvp, bvp2, mx); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!path) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(path); + + return path; +} // end of bson_locate_all + +void bson_locate_all_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_locate_all_deinit + +/*********************************************************************************/ +/* Check whether the document contains a value or item. */ +/*********************************************************************************/ +my_bool bson_contains_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 1024; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (index)"); + return true; + } else if (args->arg_count > 3) { + if (args->arg_type[3] == INT_RESULT && args->args[3]) + more += (unsigned long)*(long long*)args->args[3]; + else + strcpy(message, "Fourth argument is not an integer (memory)"); + + } // endif's + + CalcLen(args, false, reslen, memlen); + //memlen += more; + + // TODO: calculate this + more += (IsArgJson(args, 0) != 3 ? 1000 : 0); + + return JsonInit(initid, args, message, false, reslen, memlen, more); +} // end of bson contains_init + +long long bson_contains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) +{ + char isn, res[256]; + unsigned long reslen; + + isn = 0; + bsonlocate(initid, args, res, &reslen, &isn, error); + return (isn) ? 0LL : 1LL; +} // end of bson_contains + +void bson_contains_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_contains_deinit + +/*********************************************************************************/ +/* Check whether the document contains a path. */ +/*********************************************************************************/ +my_bool bsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 1024; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a string (path)"); + return true; + } else if (args->arg_count > 2) { + if (args->arg_type[2] == INT_RESULT && args->args[2]) + more += (unsigned long)*(long long*)args->args[2]; + else + strcpy(message, "Third argument is not an integer (memory)"); + + } // endif's + + CalcLen(args, false, reslen, memlen); + //memlen += more; + + // TODO: calculate this + more += (IsArgJson(args, 0) != 3 ? 1000 : 0); + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsoncontains_path_init + +long long bsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) +{ + char *p, *path; + long long n; + PBVAL jsp; + PBVAL jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (!g->Activityp) { + return 0LL; + } else + return *(long long*)g->Activityp; + + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto err; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + bxp = new(g) BJNX(g, jsp, TYPE_BIGINT); + path = MakePSZ(g, args, 1); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + goto err; + } // endif SetJpath + + n = (bxp->CheckPath(g)) ? 1LL : 0LL; + + if (initid->const_item) { + // Keep result of constant function + long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)); + + if (np) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + + } // endif const_item + + return n; + +err: + if (g->Mrr) *error = 1; + return 0LL; +} // end of bsoncontains_path + +void bsoncontains_path_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsoncontains_path_deinit + +/*********************************************************************************/ +/* Merge two arrays or objects. */ +/*********************************************************************************/ +my_bool bson_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else for (int i = 0; i < 2; i++) + if (!IsArgJson(args, i) && args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Argument %d must be a json item", i); + return true; + } // endif type + + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_item_merge_init + +char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, false, true)) { + JTYP type; + BJNX bnx(g); + PBVAL jvp, top = NULL; + PBVAL jsp[2] = {NULL, NULL}; + + for (int i = 0; i < 2; i++) { + jvp = bnx.MakeValue(args, i, true); + + if (i) { + if (jvp->Type != type) { + PUSH_WARNING("Argument types mismatch"); + goto fin; + } // endif type + + } else { + type = (JTYP)jvp->Type; + + if (type != TYPE_JAR && type != TYPE_JOB) { + PUSH_WARNING("First argument is not an array or object"); + goto fin; + } else + top = jvp; + + } // endif i + + jsp[i] = jvp; + } // endfor i + + if (type == TYPE_JAR) + bnx.MergeArray(jsp[0], jsp[1]); + else + bnx.MergeObject(jsp[0], jsp[1]); + + bnx.SetChanged(true); + str = bnx.MakeResult(args, top); + } // endif CheckMemory + + // In case of error or file, return unchanged first argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *res_length = 0; + *error = 1; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_item_merge + +void bson_item_merge_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_item_merge_deinit + +/*********************************************************************************/ +/* Get a Json item from a Json document. */ +/*********************************************************************************/ +my_bool bson_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a string (jpath)"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + if (n == 2 && args->args[0]) { + char fn[_MAX_PATH]; + long fl; + + memcpy(fn, args->args[0], args->lengths[0]); + fn[args->lengths[0]] = 0; + fl = GetFileLength(fn); + more = fl * 3; + } else if (n != 3) { + more = args->lengths[0] * 3; + } else + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bson_get_item_init + +char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *) +{ + char *path, *str = NULL; + PBVAL jvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g, NULL, TYPE_STRING, initid->max_length); + + if (g->N) { + str = (char*)g->Activityp; + goto fin; + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, true)) { + PUSH_WARNING("CheckMemory error"); + goto fin; + } else { + bnx.Reset(); + jvp = bnx.MakeValue(args, 0, true); + + if (g->Mrr) { // First argument is a constant + g->Xchk = jvp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jvp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + + if (bnx.SetJpath(g, path, true)) { + goto fin; + } else + jvp = bnx.GetRowValue(g, jvp, 0); + + if (!bnx.IsJson(jvp)) { + strcpy(g->Message, "Not a Json item"); + } else + str = bnx.Serialize(g, jvp, NULL, 0); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + +fin: + if (!str) { + PUSH_WARNING(g->Message); + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_get_item + +void bson_get_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_get_item_deinit + +/*********************************************************************************/ +/* Get a string value from a Json item. */ +/*********************************************************************************/ +my_bool bsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 1024; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a string (jpath)"); + return true; + } else if (args->arg_count > 2) { + if (args->arg_type[2] == INT_RESULT && args->args[2]) + more += (unsigned long)*(long long*)args->args[2]; + else + strcpy(message, "Third argument is not an integer (memory)"); + + } // endif's + + CalcLen(args, false, reslen, memlen); + //memlen += more; + + if (n == 2 && args->args[0]) { + char fn[_MAX_PATH]; + long fl; + + memcpy(fn, args->args[0], args->lengths[0]); + fn[args->lengths[0]] = 0; + fl = GetFileLength(fn); + more += fl * 3; + } else if (n != 3) + more += args->lengths[0] * 3; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonget_string_init + +char *bsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *) +{ + char *p, *path, *str = NULL; + PBVAL jsp, jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + str = (char*)g->Activityp; + goto err; + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto err; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + bxp = new(g) BJNX(g, jsp, TYPE_STRING, initid->max_length); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + goto err; + } else + bxp->ReadValue(g); + + if (!bxp->GetValue()->IsNull()) + str = bxp->GetValue()->GetCharValue(); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch + +err: + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bsonget_string + +void bsonget_string_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonget_string_deinit + +/*********************************************************************************/ +/* Get an integer value from a Json item. */ +/*********************************************************************************/ +my_bool bsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more; + + if (args->arg_count != 2) { + strcpy(message, "This function must have 2 arguments"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a (jpath) string"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + more = (IsArgJson(args, 0) != 3) ? 1000 : 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonget_int_init + +long long bsonget_int(UDF_INIT *initid, UDF_ARGS *args, + char *is_null, char *error) +{ + char *p, *path; + long long n; + PBVAL jsp, jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (!g->Activityp) { + *is_null = 1; + return 0LL; + } else + return *(long long*)g->Activityp; + + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + if (g->Mrr) *error = 1; + *is_null = 1; + return 0LL; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + if (g->Mrr) *error = 1; + *is_null = 1; + return 0; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + bxp = new(g) BJNX(g, jsp, TYPE_BIGINT); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0; + } else + bxp->ReadValue(g); + + if (bxp->GetValue()->IsNull()) { + *is_null = 1; + return 0; + } // endif IsNull + + n = bxp->GetValue()->GetBigintValue(); + + if (initid->const_item) { + // Keep result of constant function + long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)); + + if (np) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + + } // endif const_item + + return n; +} // end of bsonget_int + +void bsonget_int_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonget_int_deinit + +/*********************************************************************************/ +/* Get a double value from a Json item. */ +/*********************************************************************************/ +my_bool bsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a (jpath) string"); + return true; + } else if (args->arg_count > 2) { + if (args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (decimals)"); + return true; + } else + initid->decimals = (uint)*(longlong*)args->args[2]; + + } else + initid->decimals = 15; + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + more = (IsArgJson(args, 0) != 3) ? 1000 : 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonget_real_init + +double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, + char *is_null, char *error) +{ + char *p, *path; + double d; + PBVAL jsp, jvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g); + + if (g->N) { + if (!g->Activityp) { + *is_null = 1; + return 0.0; + } else + return *(double*)g->Activityp; + + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + if (g->Mrr) *error = 1; + *is_null = 1; + return 0.0; + } else { + bnx.Reset(); + jvp = bnx.MakeValue(args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); +//bxp = new(g) BJNX(g, jsp, TYPE_DOUBLE, 32, jsp->Nd); + + if (bnx.SetJpath(g, path)) { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } else + jvp = bnx.GetRowValue(g, jsp, 0); + + if (!jvp || bnx.IsValueNull(jvp)) { + *is_null = 1; + return 0.0; + } else if (args->arg_count == 2) { + d = atof(bnx.GetString(jvp)); + } else + d = bnx.GetDouble(jvp); + + if (initid->const_item) { + // Keep result of constant function + double *dp; + + if ((dp = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) { + *dp = d; + g->Activityp = (PACTIVITY)dp; + } else { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } // endif dp + + } // endif const_item + + return d; +} // end of jsonget_real + +void bsonget_real_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonget_real_deinit + +/*********************************************************************************/ +/* Delete items from a Json document. */ +/*********************************************************************************/ +my_bool bson_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + if (IsArgJson(args, 0) != 3) { + strcpy(message, "This function must have at least 2 arguments or one binary"); + return true; + } // endif args + + } // endif count + + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // Is this a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_delete_item_init + +char *bson_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path, *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, false, true)) { + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top, jar = NULL; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (args->arg_count == 1) { + // This should be coming from bbin_locate_all + jar = jvp; // This is the array of paths + jvp = top; // And this is the document + } else if(!bnx.IsJson(jvp)) { + PUSH_WARNING("First argument is not a JSON document"); + goto fin; + } else if (args->arg_count == 2) { + // Check whether this is an array of paths + jar = bnx.MakeValue(args, 1, true); + + if (jar && jar->Type != TYPE_JAR) + jar = NULL; + + } // endif arg_count + + if (jar) { + // Do the deletion in reverse order + for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) { + path = bnx.GetString(bnx.GetArrayValue(jar, i)); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + } else for (uint i = 1; i < args->arg_count; i++) { + path = MakePSZ(g, args, i); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + str = bnx.MakeResult(args, top, INT_MAX); + } // endif CheckMemory + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_delete_item + +void bson_delete_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_delete_item_deinit + +/*********************************************************************************/ +/* This function is used by the json_set/insert/update_item functions. */ +/*********************************************************************************/ +static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path, *str = NULL; + int w; + my_bool b = true; + PBJNX bxp; + PBVAL jsp, jvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Alchecked) { + str = (char*)g->Activityp; + goto fin; + } else if (g->N) + g->Alchecked = 1; + + if (!strcmp(result, "$set")) + w = 0; + else if (!strcmp(result, "$insert")) + w = 1; + else if (!strcmp(result, "$update")) + w = 2; + else { + PUSH_WARNING("Logical error, please contact CONNECT developer"); + goto fin; + } // endelse + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, false, true)) { + PUSH_WARNING("CheckMemory error"); + throw 1; + } else { + BJNX bnx(g); + + jsp = bnx.MakeValue(args, 0, true); + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endif CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); + + for (uint i = 1; i + 1 < args->arg_count; i += 2) { + jvp = bxp->MakeValue(args, i); + path = MakePSZ(g, args, i + 1); + + if (bxp->SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + if (w) { + bxp->ReadValue(g); + b = bxp->GetValue()->IsNull(); + b = (w == 1) ? b : !b; + } // endif w + + if (b && bxp->WriteValue(g, jvp)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bxp->SetChanged(true); + } // endfor i + + // In case of error or file, return unchanged argument + if (!(str = bxp->MakeResult(args, jsp, INT_MAX32))) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch + +fin: + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_handle_item + +/*********************************************************************************/ +/* Set Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bson_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 0; + int n = IsArgJson(args, 0); + + if (!(args->arg_count % 2)) { + strcpy(message, "This function must have an odd number of arguments"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + if (n == 2 && args->args[0]) { + char fn[_MAX_PATH]; + long fl; + + memcpy(fn, args->args[0], args->lengths[0]); + fn[args->lengths[0]] = 0; + fl = GetFileLength(fn); + more += fl * 3; + } else if (n != 3) + more += args->lengths[0] * 3; + + if (!JsonInit(initid, args, message, true, reslen, memlen, more)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + g->Alchecked = 0; + return false; + } else + return true; + +} // end of bson_set_item_init + +char *bson_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$set"); + return bson_handle_item(initid, args, result, res_length, is_null, p); +} // end of bson_set_item + +void bson_set_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_set_item_deinit + +/*********************************************************************************/ +/* Insert Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bson_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bson_insert_item_init + +char *bson_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$insert"); + return bson_handle_item(initid, args, result, res_length, is_null, p); +} // end of bson_insert_item + +void bson_insert_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_insert_item_deinit + +/*********************************************************************************/ +/* Update Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bson_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bson_update_item_init + +char *bson_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$update"); + return bson_handle_item(initid, args, result, res_length, is_null, p); +} // end of bson_update_item + +void bson_update_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_update_item_deinit + +/*********************************************************************************/ +/* Returns a json file as a json string. */ +/*********************************************************************************/ +my_bool bson_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, fl, more = 1024; + + if (args->arg_count < 1 || args->arg_count > 4) { + strcpy(message, "This function only accepts 1 to 4 arguments"); + return true; + } else if (args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a string (file name)"); + return true; + } // endif's args[0] + + for (unsigned int i = 1; i < args->arg_count; i++) { + if (!(args->arg_type[i] == INT_RESULT || args->arg_type[i] == STRING_RESULT)) { + sprintf(message, "Argument %d is not an integer or a string (pretty or path)", i); + return true; + } // endif arg_type + + // Take care of eventual memory argument + if (args->arg_type[i] == INT_RESULT && args->args[i]) + more += (ulong)*(longlong*)args->args[i]; + + } // endfor i + + initid->maybe_null = 1; + CalcLen(args, false, reslen, memlen); + + if (args->args[0]) + fl = GetFileLength(args->args[0]); + else + fl = 100; // What can be done here? + + reslen += fl; + + if (initid->const_item) + more += fl; + + if (args->arg_count > 1) + more += fl * M; + + memlen += more; + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bson_file_init + +char *bson_file(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *fn, *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + str = (char*)g->Xchk; + goto fin; + } else if (initid->const_item) + g->N = 1; + + PlugSubSet(g->Sarea, g->Sarea_Size); + fn = MakePSZ(g, args, 0); + + if (args->arg_count > 1) { + int pretty = 3, pty = 3; + size_t len; + PBVAL jsp, jvp = NULL; + BJNX bnx(g); + + for (unsigned int i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) { + pretty = (int) * (longlong*)args->args[i]; + break; + } // endif type + + // Parse the json file and allocate its tree structure + if (!(jsp = bnx.ParseJsonFile(g, fn, pty, len))) { + PUSH_WARNING(g->Message); + goto fin; + } // endif jsp + + if (pty == 3) + PUSH_WARNING("File pretty format cannot be determined"); + else if (pretty != 3 && pty != pretty) + PUSH_WARNING("File pretty format doesn't match the specified pretty value"); + else if (pretty == 3) + pretty = pty; + + // Check whether a path was specified + if (bnx.CheckPath(g, args, jsp, jvp, 1)) { + PUSH_WARNING(g->Message); + goto fin; + } else if (jvp) + jsp = jvp; + + if (!(str = bnx.Serialize(g, jsp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else + if (!(str = GetJsonFile(g, fn))) + PUSH_WARNING(g->Message); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_file + +void bson_file_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_file_deinit + +/*********************************************************************************/ +/* Make a json file from a json item. */ +/*********************************************************************************/ +my_bool bfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 1 || args->arg_count > 3) { + strcpy(message, "Wrong number of arguments"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } // endif + + CalcLen(args, false, reslen, memlen); + memlen = memlen + 5000; // To take care of not pretty files + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bfile_make_init + +char *bfile_make(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *) +{ + char *p, *str = NULL, *fn = NULL; + int n, pretty = 2; + PBVAL jsp, jvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g); + + if (g->N) { + str = (char*)g->Activityp; + goto fin; + } else if (initid->const_item) + g->N = 1; + + if ((n = IsArgJson(args, 0)) == 3) { + // Get default file name and pretty + PBSON bsp = (PBSON)args->args[0]; + + fn = bsp->Filename; + pretty = bsp->Pretty; + } else if ((n = IsArgJson(args, 0)) == 2) + fn = args->args[0]; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto fin; + } else + bnx.Reset(); + + jvp = bnx.MakeValue(args, 0); + + if (!n && (p = bnx.GetString(jvp))) { + if (!strchr("[{ \t\r\n", *p)) { + // Is this a file name? + if (!(p = GetJsonFile(g, p))) { + PUSH_WARNING(g->Message); + goto fin; + } else + fn = bnx.GetString(jvp); + + } // endif p + + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto fin; + } // endif jsp + + bnx.SetValueVal(jvp, jsp); + } // endif p + + if (g->Mrr) { // First argument is a constant + g->Xchk = jvp; + JsonMemSave(g); + } // endif Mrr + + } else + jvp = (PBVAL)g->Xchk; + + for (uint i = 1; i < args->arg_count; i++) + switch (args->arg_type[i]) { + case STRING_RESULT: + fn = MakePSZ(g, args, i); + break; + case INT_RESULT: + pretty = (int)*(longlong*)args->args[i]; + break; + default: + PUSH_WARNING("Unexpected argument type in bfile_make"); + } // endswitch arg_type + + if (fn) { + if (!bnx.Serialize(g, jvp, fn, pretty)) + PUSH_WARNING(g->Message); + } else + PUSH_WARNING("Missing file name"); + + str = fn; + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + +fin: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bfile_make + +void bfile_make_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bfile_make_deinit + +/*********************************************************************************/ +/* Convert a prettiest Json file to Pretty=0. */ +/*********************************************************************************/ +my_bool bfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count != 3) { + strcpy(message, "This function must have 3 arguments"); + return true; + } else if (args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third Argument must be an integer (LRECL)"); + return true; + } else for (int i = 0; i < 2; i++) + if (args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Arguments %d must be a string (file name)", i+1); + return true; + } // endif args + + CalcLen(args, false, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bfile_convert_init + +char *bfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long *res_length, char *is_null, char *error) { + char *str, *fn, *ofn; + int lrecl = (int)*(longlong*)args->args[2]; + PGLOBAL g = (PGLOBAL)initid->ptr; + + PlugSubSet(g->Sarea, g->Sarea_Size); + fn = MakePSZ(g, args, 0); + ofn = MakePSZ(g, args, 1); + + if (!g->Xchk) { + JUP* jup = new(g) JUP(g); + + str = jup->UnprettyJsonFile(g, fn, ofn, lrecl); + g->Xchk = str; + } else + str = (char*)g->Xchk; + + if (!str) { + PUSH_WARNING(g->Message ? g->Message : "Unexpected error"); + *is_null = 1; + *error = 1; + *res_length = 0; + } else { + strcpy(result, str); + *res_length = strlen(str); + } // endif str + + return str; +} // end of bfile_convert + +void bfile_convert_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bfile_convert_deinit + +/*********************************************************************************/ +/* Convert a pretty=0 Json file to binary BJSON. */ +/*********************************************************************************/ +my_bool bfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count != 2 && args->arg_count != 3) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third Argument must be an integer (LRECL)"); + return true; + } else for (int i = 0; i < 2; i++) + if (args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Arguments %d must be a string (file name)", i + 1); + return true; + } // endif args + + CalcLen(args, false, reslen, memlen); + memlen = memlen * M; + memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024; + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bfile_bjson_init + +char *bfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char*, char *error) { + char *buf, *str = NULL, fn[_MAX_PATH], ofn[_MAX_PATH]; + bool loop; + ssize_t len, newloc; + size_t lrecl, binszp; + PBVAL jsp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BDOC doc(g); + + strcpy(fn, MakePSZ(g, args, 0)); + strcpy(ofn, MakePSZ(g, args, 1)); + + if (args->arg_count == 3) + lrecl = (size_t)*(longlong*)args->args[2]; + else + lrecl = 1024; + + if (!g->Xchk) { + int msgid = MSGID_OPEN_MODE_STRERROR; + FILE *fout; + FILE *fin; + + if (!(fin = global_fopen(g, msgid, fn, "rt"))) + str = strcpy(result, g->Message); + else if (!(fout = global_fopen(g, msgid, ofn, "wb"))) + str = strcpy(result, g->Message); + else if ((buf = (char*)malloc(lrecl))) { + try { + do { + loop = false; + PlugSubSet(g->Sarea, g->Sarea_Size); + + if (!fgets(buf, lrecl, fin)) { + if (!feof(fin)) { + sprintf(g->Message, "Error %d reading %zd bytes from %s", + errno, lrecl, fn); + str = strcpy(result, g->Message); + } else + str = strcpy(result, ofn); + + } else if ((len = strlen(buf))) { + if ((jsp = doc.ParseJson(g, buf, len))) { + newloc = (size_t)PlugSubAlloc(g, NULL, 0); + binszp = newloc - (size_t)jsp; + + if (fwrite(&binszp, sizeof(binszp), 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, sizeof(binszp), ofn); + str = strcpy(result, g->Message); + } else if (fwrite(jsp, binszp, 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, binszp, ofn); + str = strcpy(result, g->Message); + } else + loop = true; + + } else { + str = strcpy(result, g->Message); + } // endif jsp + + } else + loop = true; + + } while (loop); + + } catch (int) { + str = strcpy(result, g->Message); + } catch (const char* msg) { + str = strcpy(result, msg); + } // end catch + + free(buf); + } else + str = strcpy(result, "Buffer malloc failed"); + + if (fin) fclose(fin); + if (fout) fclose(fout); + g->Xchk = str; + } else + str = (char*)g->Xchk; + + if (!str) { + if (g->Message) + str = strcpy(result, g->Message); + else + str = strcpy(result, "Unexpected error"); + + } // endif str + + *res_length = strlen(str); + return str; +} // end of bfile_bjson + +void bfile_bjson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bfile_bjson_deinit + +/*********************************************************************************/ +/* Serialize a Json document. . */ +/*********************************************************************************/ +my_bool bson_serialize_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->args[0] && IsArgJson(args, 0) != 3) { + strcpy(message, "Argument must be a Jbin tree"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_serialize_init + +char *bson_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *error) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (IsArgJson(args, 0) == 3) { + PBSON bsp = (PBSON)args->args[0]; + BJNX bnx(bsp->G); + PBVAL bvp = (args->arg_count == 1) ? (PBVAL)bsp->Jsp : (PBVAL)bsp->Top; + +// if (!(str = bnx.Serialize(g, bvp, bsp->Filename, bsp->Pretty))) + if (!(str = bnx.Serialize(g, bvp, NULL, 0))) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else { + // *error = 1; + str = strcpy(result, "Argument is not a Jbin tree"); + } // endif + + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_serialize + +void bson_serialize_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_serialize_deinit + +/*********************************************************************************/ +/* Make and return a binary Json array containing all the parameters. */ +/* Note: jvp must be set before arp because it can be a binary argument. */ +/*********************************************************************************/ +my_bool bbin_make_array_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, false, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_make_array_init + +char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false)) { + BJNX bnx(g); + PBVAL jvp = bnx.MakeValue(args, 0); + PBVAL arp = bnx.NewVal(TYPE_JAR); + + for (uint i = 0; i < args->arg_count;) { + bnx.AddArrayValue(arp, jvp); + jvp = bnx.MakeValue(args, ++i); + } // endfor i + + if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) { + strcat(bsp->Msg, " array"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif CheckMemory + + } else + bsp = (PBSON)g->Xchk; + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_make_array + +void bbin_make_array_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_make_array_deinit + +/*********************************************************************************/ +/* Add one value to a Json array. */ +/*********************************************************************************/ +my_bool bbin_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bbin_array_add_init + +char *bbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + *res_length = sizeof(BSON); + return (char*)bsp; + } else if (!CheckMemory(g, initid, args, 2, false, false, true)) { + uint n = 2; + int* x = GetIntArgPtr(g, args, n); + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL jarp, top, jvp = NULL; + PBVAL jsp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp && jvp->Type != TYPE_JAR) { + if ((jarp = bnx.NewVal(TYPE_JAR))) { + bnx.AddArrayValue(jarp, jvp); + + if (!top) + top = jarp; + + } // endif jarp + + } else + jarp = jvp; + + if (jarp) { + bnx.AddArrayValue(jarp, bnx.MakeValue(args, 1), x); + bnx.SetChanged(true); + bsp = bnx.MakeBinResult(args, top, initid->max_length); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } else + PUSH_WARNING(g->Message); + + } // endif CheckMemory + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_add + +void bbin_array_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_add_deinit + +/*********************************************************************************/ +/* Add one or several values to a Bson array. */ +/*********************************************************************************/ +my_bool bbin_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + return bson_array_add_values_init(initid, args, message); +} // end of bbin_array_add_values_init + +char* bbin_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, true)) { + uint i = 0; + BJNX bnx(g); + PBVAL arp, top, jvp = NULL; + PBVAL bvp = bnx.MakeValue(args, 0, true, &top); + + if (bvp->Type == TYPE_JAR) { + arp = bvp; + i = 1; + } else // First argument is not an array + arp = bnx.NewVal(TYPE_JAR); + + for (; i < args->arg_count; i++) + bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); + + bnx.SetChanged(true); + bsp = bnx.MakeBinResult(args, top, initid->max_length); + } // endif CheckMemory + + // Keep result of constant function + g->Xchk = (g->N) ? bsp : NULL; + } else + bsp = (PBSON)g->Xchk; + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_add_values + +void bbin_array_add_values_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_add_values_deinit + +/*********************************************************************************/ +/* Make a Json array from values coming from rows. */ +/*********************************************************************************/ +my_bool bbin_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_array_grp_init(initid, args, message); +} // end of bbin_array_grp_init + +void bbin_array_grp_clear(UDF_INIT *initid, char *a, char *b) +{ + bson_array_grp_clear(initid, a, b); +} // end of bbin_array_grp_clear + +void bbin_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b) +{ + bson_array_grp_add(initid, args, a, b); +} // end of bbin_array_grp_add + +char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBVAL arp = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (arp) + if ((bsp = BbinAlloc(g, initid->max_length, arp))) + strcat(bsp->Msg, " array"); + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_grp + +void bbin_array_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_grp_deinit + +/*********************************************************************************/ +/* Make a Json object from values coming from rows. */ +/*********************************************************************************/ +my_bool bbin_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_object_grp_init(initid, args, message); +} // end of bbin_object_grp_init + +void bbin_object_grp_clear(UDF_INIT *initid, char *a, char *b) +{ + bson_object_grp_clear(initid, a, b); +} // end of bbin_object_grp_clear + +void bbin_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b) +{ + bson_object_grp_add(initid, args, a, b); +} // end of bbin_object_grp_add + +char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBVAL bop = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (bop) + if ((bsp = BbinAlloc(g, initid->max_length, bop))) + strcat(bsp->Msg, " object"); + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_grp + +void bbin_object_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_grp_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the parameters. */ +/*********************************************************************************/ +my_bool bbin_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bbin_make_object_init + +char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, args->arg_count, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i)); + + if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { + strcat(bsp->Msg, " object"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif objp + + } // endif CheckMemory + + } // endif Xchk + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_make_object + +void bbin_make_object_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_make_object_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all not null parameters. */ +/*********************************************************************************/ +my_bool bbin_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_nonull_init + +char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL jvp, objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i))) + bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); + + if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { + strcat(bsp->Msg, " object"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif objp + + } // endif CheckMemory + + } // endif Xchk + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_nonull + +void bbin_object_nonull_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_nonull_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the key/value parameters. */ +/*********************************************************************************/ +my_bool bbin_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count % 2) { + strcpy(message, "This function must have an even number of arguments"); + return true; + } // endif arg_count + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_key_init + +char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i += 2) + bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i)); + + if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { + strcat(bsp->Msg, " object"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif objp + + } // endif CheckMemory + + } // endif Xchk + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_key + +void bbin_object_key_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_key_deinit + +/*********************************************************************************/ +/* Add or replace a value in a Json Object. */ +/*********************************************************************************/ +my_bool bbin_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_add_init + +char *bbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + *res_length = sizeof(BSON); + return (char*)bsp; + } else if (!CheckMemory(g, initid, args, 2, false, true, true)) { + PSZ key; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top; + PBVAL jobp = bnx.MakeValue(args, 0, true, &top); + PBVAL jvp = jobp; + + if (bnx.CheckPath(g, args, jvp, jobp, 2)) + PUSH_WARNING(g->Message); + else if (jobp && jobp->Type == TYPE_JOB) { + jvp = bnx.MakeValue(args, 1); + key = bnx.MakeKey(args, 1); + bnx.SetKeyValue(jobp, jvp, key); + bnx.SetChanged(true); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jobp + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(args, top, initid->max_length); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } // endif CheckMemory + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_add + +void bbin_object_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_add_deinit + +/*********************************************************************************/ +/* Delete a value from a Json array. */ +/*********************************************************************************/ +my_bool bbin_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_array_delete_init(initid, args, message); +} // end of bbin_array_delete_init + +char *bbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + } else if (!CheckMemory(g, initid, args, 1, false, false, true)) { + int* x; + uint n = 1; + BJNX bnx(g); + PBVAL arp, top; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (!(x = GetIntArgPtr(g, args, n))) + PUSH_WARNING("Missing or null array index"); + else if (bnx.CheckPath(g, args, jvp, arp, 1)) + PUSH_WARNING(g->Message); + else if (arp && arp->Type == TYPE_JAR) { + bnx.SetChanged(bnx.DeleteValue(arp, *x)); + bsp = bnx.MakeBinResult(args, top, initid->max_length); + } else { + PUSH_WARNING("First argument target is not an array"); + // if (g->Mrr) *error = 1; + } // endif jvp + + if (g->N) + // Keep result of constant function + g->Xchk = bsp; + + } // endif CheckMemory + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_delete + +void bbin_array_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_delete_deinit + +/*********************************************************************************/ +/* Delete a value from a Json object. */ +/*********************************************************************************/ +my_bool bbin_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument must be a key string"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_delete_init + +char *bbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + *res_length = sizeof(BSON); + return (char*)bsp; + } else if (!CheckMemory(g, initid, args, 1, false, true, true)) { + PCSZ key; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top; + PBVAL jobp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, top, jobp, 2)) + PUSH_WARNING(g->Message); + else if (jobp && jobp->Type == TYPE_JOB) { + key = bnx.MakeKey(args, 1); + bnx.SetChanged(bnx.DeleteKey(jobp, key)); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(args, top, initid->max_length); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } // endif CheckMemory + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_delete + +void bbin_object_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_delete_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object keys. */ +/*********************************************************************************/ +my_bool bbin_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_object_list_init(initid, args, message); +} // end of bbin_object_list_init + +char *bbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + PBVAL top, jarp = NULL; + PBVAL jsp = bnx.MakeValue(args, 0, true, &top); + + if (jsp->Type == TYPE_JOB) { + jarp = bnx.GetKeyList(jsp); + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jsp type + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(args, top, initid->max_length); + bsp->Jsp = (PJSON)jarp; + + } // endif CheckMemory + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_list + +void bbin_object_list_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_list_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object values. */ +/*********************************************************************************/ +my_bool bbin_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_object_values_init(initid, args, message); +} // end of bbin_object_values_init + +char *bbin_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + PBVAL top, jarp; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (jvp->Type == TYPE_JOB) { + jarp = bnx.GetObjectValList(jvp); + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(args, top, initid->max_length); + bsp->Jsp = (PJSON)jarp; + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = bsp; + } // endif const_item + + } // endif bsp + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_values + +void bbin_object_values_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_values_deinit + +/*********************************************************************************/ +/* Get a Json item from a Json document. */ +/*********************************************************************************/ +my_bool bbin_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_get_item_init(initid, args, message); +} // end of bbin_get_item_init + +char *bbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + bsp = (PBSON)g->Xchk; + } else if (!CheckMemory(g, initid, args, 1, true, true)) { + char *path = MakePSZ(g, args, 1); + BJNX bnx(g, NULL, TYPE_STRING, initid->max_length); + PBVAL top, jvp = NULL; + PBVAL jsp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, jsp, jvp, 1)) + PUSH_WARNING(g->Message); + else if (jvp) { + bsp = bnx.MakeBinResult(args, top, initid->max_length); + bsp->Jsp = (PJSON)jvp; + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } // endif jvp + + } else + PUSH_WARNING("CheckMemory error"); + + if (!bsp) { + *is_null = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_get_item + +void bbin_get_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_get_item_deinit + +/*********************************************************************************/ +/* Merge two arrays or objects. */ +/*********************************************************************************/ +my_bool bbin_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_item_merge_init(initid, args, message); +} // end of bbin_item_merge_init + +char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, false, true)) { + JTYP type; + BJNX bnx(g); + PBVAL jvp, top = NULL; + PBVAL jsp[2] = {NULL, NULL}; + + for (int i = 0; i < 2; i++) { + if (i) { + jvp = bnx.MakeValue(args, i, true); + + if (jvp->Type != type) { + PUSH_WARNING("Argument types mismatch"); + goto fin; + } // endif type + + } else { + jvp = bnx.MakeValue(args, i, true, &top); + type = (JTYP)jvp->Type; + + if (type != TYPE_JAR && type != TYPE_JOB) { + PUSH_WARNING("First argument is not an array or object"); + goto fin; + } // endif type + + } // endif i + + jsp[i] = jvp; + } // endfor i + + if (type == TYPE_JAR) + bnx.MergeArray(jsp[0], jsp[1]); + else + bnx.MergeObject(jsp[0], jsp[1]); + + bnx.SetChanged(true); + bsp = bnx.MakeBinResult(args, top, initid->max_length); + } // endif CheckMemory + + if (g->N) + // Keep result of constant function + g->Xchk = bsp; + +fin: + if (!bsp) { + *res_length = 0; + *error = 1; + *is_null = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_item_merge + +void bbin_item_merge_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_item_merge_deinit + +/*********************************************************************************/ +/* This function is used by the jbin_set/insert/update_item functions. */ +/*********************************************************************************/ +static char *bbin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path; + int w; + my_bool b = true; + PBJNX bxp; + PBVAL jsp, jvp, top; + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Alchecked) { + bsp = (PBSON)g->Activityp; + goto fin; + } else if (g->N) + g->Alchecked = 1; + + if (!strcmp(result, "$set")) + w = 0; + else if (!strcmp(result, "$insert")) + w = 1; + else if (!strcmp(result, "$update")) + w = 2; + else { + PUSH_WARNING("Logical error, please contact CONNECT developer"); + goto fin; + } // endelse + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, false, true)) { + throw 1; + } else { + BJNX bnx(g); + + jsp = bnx.MakeValue(args, 0, true, &top); + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + g->More = (size_t)top; + JsonMemSave(g); + } // endif Mrr + + } // endif CheckMemory + + } else { + jsp = (PBVAL)g->Xchk; + top = (PBVAL)g->More; + } // endif Xchk + + bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); + + for (uint i = 1; i + 1 < args->arg_count; i += 2) { + jvp = bxp->MakeValue(args, i); + path = MakePSZ(g, args, i + 1); + + if (bxp->SetJpath(g, path, false)) + throw 2; + + if (w) { + bxp->ReadValue(g); + b = bxp->GetValue()->IsNull(); + b = (w == 1) ? b : !b; + } // endif w + + if (b && bxp->WriteValue(g, jvp)) + throw 3; + + bxp->SetChanged(true); + } // endfor i + + if (!(bsp = bxp->MakeBinResult(args, top, initid->max_length))) + throw 4; + + if (g->N) + // Keep result of constant function + g->Activityp = (PACTIVITY)bsp; + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + } // end catch + +fin: + if (!bsp) { + *is_null = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_handle_item + +/*********************************************************************************/ +/* Set Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bbin_set_item_init + +char *bbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$set"); + return bbin_handle_item(initid, args, result, res_length, is_null, p); +} // end of bbin_set_item + +void bbin_set_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_set_item_deinit + +/*********************************************************************************/ +/* Insert Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bbin_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bbin_insert_item_init + +char *bbin_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$insert"); + return bbin_handle_item(initid, args, result, res_length, is_null, p); +} // end of bbin_insert_item + +void bbin_insert_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_insert_item_deinit + +/*********************************************************************************/ +/* Update Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bbin_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bbin_update_item_init + +char *bbin_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$update"); + return bbin_handle_item(initid, args, result, res_length, is_null, p); +} // end of bbin_update_item + +void bbin_update_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_update_item_deinit + +/*********************************************************************************/ +/* Delete items from a Json document. */ +/*********************************************************************************/ +my_bool bbin_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_delete_item_init(initid, args, message); +} // end of bbin_delete_item_init + +char *bbin_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path; + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, false, true)) { + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top, jar = NULL; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (args->arg_count == 1) { + // This should be coming from bbin_locate_all + jar = jvp; // This is the array of paths + jvp = top; // And this is the document + } else if(!bnx.IsJson(jvp)) { + PUSH_WARNING("First argument is not a JSON document"); + goto fin; + } else if (args->arg_count == 2) { + // Check whether this is an array of paths + jar = bnx.MakeValue(args, 1, true); + + if (jar && jar->Type != TYPE_JAR) + jar = NULL; + + } // endif arg_count + + if (jar) { + // Do the deletion in reverse order + for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) { + path = bnx.GetString(bnx.GetArrayValue(jar, i)); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + } else for (uint i = 1; i < args->arg_count; i++) { + path = MakePSZ(g, args, i); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + bsp = bnx.MakeBinResult(args, top, initid->max_length); + + if (args->arg_count == 1) + // Here Jsp was not a sub-item of top + bsp->Jsp = (PJSON)top; + + } // endif CheckMemory + + if (g->N) + // Keep result of constant function + g->Xchk = bsp; + +fin: + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_delete_item + +void bbin_delete_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_delete_item_deinit + +/*********************************************************************************/ +/* Returns a json file as a json binary tree. */ +/*********************************************************************************/ +my_bool bbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_file_init(initid, args, message); +} // end of bbin_file_init + +char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *fn; + int pretty = 3; + size_t len = 0; + PBVAL jsp, jvp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g); + PBSON bsp = (PBSON)g->Xchk; + + if (bsp) + goto fin; + + fn = MakePSZ(g, args, 0); + + for (unsigned int i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) { + pretty = (int) * (longlong*)args->args[i]; + break; + } // endif type + + // Parse the json file and allocate its tree structure + if (!(jsp = bnx.ParseJsonFile(g, fn, pretty, len))) { + PUSH_WARNING(g->Message); + *error = 1; + goto fin; + } // endif jsp + +// if (pretty == 3) +// PUSH_WARNING("File pretty format cannot be determined"); +// else if (pretty == 3) +// pretty = pty; + + if ((bsp = BbinAlloc(bnx.G, len, jsp))) { + strcat(bsp->Msg, " file"); + bsp->Filename = fn; + bsp->Pretty = pretty; + } else { + *error = 1; + goto fin; + } // endif bsp + + // Check whether a path was specified + if (bnx.CheckPath(g, args, jsp, jvp, 1)) { + PUSH_WARNING(g->Message); + bsp = NULL; + goto fin; + } else if (jvp) + bsp->Jsp = (PJSON)jvp; + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + +fin: + if (!bsp) { + *res_length = 0; + *is_null = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_file + +void bbin_file_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_file_deinit + +/*********************************************************************************/ +/* Locate all occurences of a value in a Json tree. */ +/*********************************************************************************/ +my_bool bbin_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + return bson_locate_all_init(initid, args, message); +} // end of bbin_locate_all_init + +char* bbin_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char *path = NULL; + int mx = 10; + PBVAL bvp, bvp2; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->N) { + if (g->Activityp) { + bsp = (PBSON)g->Activityp; + *res_length = sizeof(BSON); + return (char*)bsp; + } else { + *error = 1; + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + PBVAL top = NULL; + BJNX bnx(g); + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bnx.Reset(); + + bvp = bnx.MakeValue(args, 0, true, &top); + + if (bvp->Type == TYPE_NULL) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + g->More = (size_t)top; + JsonMemSave(g); + } // endif Mrr + + } else { + bvp = (PBVAL)g->Xchk; + top = (PBVAL)g->More; + } // endif Xchk + + // The item to locate + bvp2 = bnx.MakeValue(args, 1, true); + + if (bvp2->Type == TYPE_NULL) { + PUSH_WARNING("Invalid second argument"); + goto err; + } // endif bvp2 + + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; + + if ((path = bnx.LocateAll(g, bvp, bvp2, mx))) { + bsp = bnx.MakeBinResult(args, top, initid->max_length); + bsp->Jsp = (PJSON)bnx.ParseJson(g, path, strlen(path)); + } // endif path + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)bsp; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!bsp) { + *res_length = 0; + *is_null = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_locate_all + +void bbin_locate_all_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_locate_all_deinit + + diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h new file mode 100644 index 00000000000..bbfd1ceed80 --- /dev/null +++ b/storage/connect/bsonudf.h @@ -0,0 +1,411 @@ +/******************** tabjson H Declares Source Code File (.H) *******************/ +/* Name: bsonudf.h Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */ +/* */ +/* This file contains the BSON UDF function and class declares. */ +/*********************************************************************************/ +#pragma once +#include "jsonudf.h" +#include "bson.h" + +#if 0 +#define UDF_EXEC_ARGS \ + UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char* + +// BSON size should be equal on Linux and Windows +#define BMX 255 +typedef struct BSON* PBSON; + +/***********************************************************************/ +/* Structure used to return binary json to Json UDF functions. */ +/***********************************************************************/ +struct BSON { + char Msg[BMX + 1]; + char *Filename; + PGLOBAL G; + int Pretty; + ulong Reslen; + my_bool Changed; + PJSON Top; + PJSON Jsp; + PBSON Bsp; +}; // end of struct BSON + +PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); + +/*********************************************************************************/ +/* The JSON tree node. Can be an Object or an Array. */ +/*********************************************************************************/ +typedef struct _jnode { + PSZ Key; // The key used for object + OPVAL Op; // Operator used for this node + PVAL CncVal; // To cont value used for OP_CNC + PVAL Valp; // The internal array VALUE + int Rank; // The rank in array + int Rx; // Read row number + int Nx; // Next to read row number +} JNODE, *PJNODE; + +/*********************************************************************************/ +/* The JSON utility functions. */ +/*********************************************************************************/ +bool IsNum(PSZ s); +char *NextChr(PSZ s, char sep); +char *GetJsonNull(void); +uint GetJsonGrpSize(void); +my_bool JsonSubSet(PGLOBAL g, my_bool b = false); +my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen, + unsigned long& memlen, my_bool mod = false); +my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn, + unsigned long reslen, unsigned long memlen, + unsigned long more = 0); +my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n, + my_bool m, my_bool obj = false, my_bool mod = false); +PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i); +int IsArgJson(UDF_ARGS* args, uint i); +char *GetJsonFile(PGLOBAL g, char* fn); + +/*********************************************************************************/ +/* Structure JPN. Used to make the locate path. */ +/*********************************************************************************/ +typedef struct _jpn { + int Type; + PCSZ Key; + int N; +} JPN, *PJPN; + +#endif // 0 + +/* --------------------------- New Testing BJSON Stuff --------------------------*/ +extern uint JsonGrpSize; +uint GetJsonGroupSize(void); + + +typedef class BJNX* PBJNX; + +/*********************************************************************************/ +/* Class BJNX: BJSON access methods. */ +/*********************************************************************************/ +class BJNX : public BDOC { +public: + // Constructors + BJNX(PGLOBAL g); + BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false); + + // Implementation + int GetPrecision(void) { return Prec; } + PVAL GetValue(void) { return Value; } + void SetRow(PBVAL vp) { Row = vp; } + void SetChanged(my_bool b) { Changed = b; } + + // Methods + my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); + my_bool ParseJpath(PGLOBAL g); + void ReadValue(PGLOBAL g); + PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i); + PBVAL GetJson(PGLOBAL g); + my_bool CheckPath(PGLOBAL g); + my_bool CheckPath(PGLOBAL g, UDF_ARGS* args, PBVAL jsp, PBVAL& jvp, int n); + my_bool WriteValue(PGLOBAL g, PBVAL jvalp); + my_bool DeleteItem(PGLOBAL g, PBVAL vlp); + char *Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); + char *LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); + PSZ MakeKey(UDF_ARGS* args, int i); + PBVAL MakeValue(UDF_ARGS* args, uint i, bool b = false, PBVAL* top = NULL); + PBVAL MakeTypedValue(PGLOBAL g, UDF_ARGS* args, uint i, + JTYP type, PBVAL* top = NULL); + PBVAL ParseJsonFile(PGLOBAL g, char* fn, int& pty, size_t& len); + char *MakeResult(UDF_ARGS* args, PBVAL top, uint n = 2); + PBSON MakeBinResult(UDF_ARGS* args, PBVAL top, ulong len, int n = 2); + +protected: + my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); + PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); + PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); + PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); + PVAL GetCalcValue(PGLOBAL g, PBVAL bap, int n); + PBVAL MakeJson(PGLOBAL g, PBVAL bvp, int n); + void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp); + PBVAL GetRow(PGLOBAL g); + PBVAL MoveVal(PBVAL vlp); + PBVAL MoveJson(PBJNX bxp, PBVAL jvp); + PBVAL MoveArray(PBJNX bxp, PBVAL jvp); + PBVAL MoveObject(PBJNX bxp, PBVAL jvp); + PBVAL MoveValue(PBJNX bxp, PBVAL jvp); + my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2); + my_bool LocateArray(PGLOBAL g, PBVAL jarp); + my_bool LocateObject(PGLOBAL g, PBVAL jobp); + my_bool LocateValue(PGLOBAL g, PBVAL jvp); + my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp); + my_bool LocateObjectAll(PGLOBAL g, PBVAL jobp); + my_bool LocateValueAll(PGLOBAL g, PBVAL jvp); + my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2); + my_bool AddPath(void); + + // Default constructor not to be used + BJNX(void) {} + + // Members + PBVAL Row; + PBVAL Bvalp; + PJPN Jpnp; + JOUTSTR *Jp; + JNODE *Nodes; // The intermediate objects + PVAL Value; + PVAL MulVal; // To value used by multiple column + char *Jpath; // The json path + int Buf_Type; + int Long; + int Prec; + int Nod; // The number of intermediate objects + int Xnod; // Index of multiple values + int K; // Kth item to locate + int I; // Index of JPN + int Imax; // Max number of JPN's + int B; // Index base + my_bool Xpd; // True for expandable column + my_bool Parsed; // True when parsed + my_bool Found; // Item found by locate + my_bool Wr; // Write mode + my_bool Jb; // Must return json item + my_bool Changed; // True when contains was modified +}; // end of class BJNX + +extern "C" { + DllExport my_bool bson_test_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_test(UDF_EXEC_ARGS); + DllExport void bson_test_deinit(UDF_INIT*); + + DllExport my_bool bsonvalue_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bsonvalue(UDF_EXEC_ARGS); + DllExport void bsonvalue_deinit(UDF_INIT*); + + DllExport my_bool bson_make_array_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_make_array(UDF_EXEC_ARGS); + DllExport void bson_make_array_deinit(UDF_INIT*); + + DllExport my_bool bson_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_array_add_values(UDF_EXEC_ARGS); + DllExport void bson_array_add_values_deinit(UDF_INIT*); + + DllExport my_bool bson_array_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_array_add(UDF_EXEC_ARGS); + DllExport void bson_array_add_deinit(UDF_INIT*); + + DllExport my_bool bson_array_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_array_delete(UDF_EXEC_ARGS); + DllExport void bson_array_delete_deinit(UDF_INIT*); + + DllExport my_bool bsonlocate_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bsonlocate(UDF_EXEC_ARGS); + DllExport void bsonlocate_deinit(UDF_INIT*); + + DllExport my_bool bson_locate_all_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_locate_all(UDF_EXEC_ARGS); + DllExport void bson_locate_all_deinit(UDF_INIT*); + + DllExport my_bool bson_contains_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bson_contains(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bson_contains_deinit(UDF_INIT*); + + DllExport my_bool bsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bsoncontains_path_deinit(UDF_INIT*); + + DllExport my_bool bson_make_object_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_make_object(UDF_EXEC_ARGS); + DllExport void bson_make_object_deinit(UDF_INIT*); + + DllExport my_bool bson_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_nonull(UDF_EXEC_ARGS); + DllExport void bson_object_nonull_deinit(UDF_INIT*); + + DllExport my_bool bson_object_key_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_key(UDF_EXEC_ARGS); + DllExport void bson_object_key_deinit(UDF_INIT*); + + DllExport my_bool bson_object_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_add(UDF_EXEC_ARGS); + DllExport void bson_object_add_deinit(UDF_INIT*); + + DllExport my_bool bson_object_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_delete(UDF_EXEC_ARGS); + DllExport void bson_object_delete_deinit(UDF_INIT*); + + DllExport my_bool bson_object_list_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_list(UDF_EXEC_ARGS); + DllExport void bson_object_list_deinit(UDF_INIT*); + + DllExport my_bool bson_object_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_values(UDF_EXEC_ARGS); + DllExport void bson_object_values_deinit(UDF_INIT*); + + DllExport my_bool bson_item_merge_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_item_merge(UDF_EXEC_ARGS); + DllExport void bson_item_merge_deinit(UDF_INIT*); + + DllExport my_bool bson_get_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_get_item(UDF_EXEC_ARGS); + DllExport void bson_get_item_deinit(UDF_INIT*); + + DllExport my_bool bsonget_string_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bsonget_string(UDF_EXEC_ARGS); + DllExport void bsonget_string_deinit(UDF_INIT*); + + DllExport my_bool bsonget_int_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonget_int(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bsonget_int_deinit(UDF_INIT*); + + DllExport my_bool bsonget_real_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport double bsonget_real(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bsonget_real_deinit(UDF_INIT*); + + DllExport my_bool bsonset_def_prec_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonset_def_prec(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bsonget_def_prec_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonget_def_prec(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bsonset_grp_size_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonset_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bsonget_grp_size_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonget_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bson_array_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bson_array_grp_clear(UDF_INIT *, char *, char *); + DllExport void bson_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bson_array_grp(UDF_EXEC_ARGS); + DllExport void bson_array_grp_deinit(UDF_INIT*); + + DllExport my_bool bson_object_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bson_object_grp_clear(UDF_INIT *, char *, char *); + DllExport void bson_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bson_object_grp(UDF_EXEC_ARGS); + DllExport void bson_object_grp_deinit(UDF_INIT*); + + DllExport my_bool bson_delete_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_delete_item(UDF_EXEC_ARGS); + DllExport void bson_delete_item_deinit(UDF_INIT*); + + DllExport my_bool bson_set_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_set_item(UDF_EXEC_ARGS); + DllExport void bson_set_item_deinit(UDF_INIT*); + + DllExport my_bool bson_insert_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_insert_item(UDF_EXEC_ARGS); + DllExport void bson_insert_item_deinit(UDF_INIT*); + + DllExport my_bool bson_update_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_update_item(UDF_EXEC_ARGS); + DllExport void bson_update_item_deinit(UDF_INIT*); + + DllExport my_bool bson_file_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_file(UDF_EXEC_ARGS); + DllExport void bson_file_deinit(UDF_INIT*); + + DllExport my_bool bfile_make_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bfile_make(UDF_EXEC_ARGS); + DllExport void bfile_make_deinit(UDF_INIT*); + + DllExport my_bool bfile_convert_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bfile_convert(UDF_EXEC_ARGS); + DllExport void bfile_convert_deinit(UDF_INIT*); + + DllExport my_bool bfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bfile_bjson(UDF_EXEC_ARGS); + DllExport void bfile_bjson_deinit(UDF_INIT*); + + DllExport my_bool bson_serialize_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_serialize(UDF_EXEC_ARGS); + DllExport void bson_serialize_deinit(UDF_INIT*); + + DllExport my_bool bbin_make_array_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_make_array(UDF_EXEC_ARGS); + DllExport void bbin_make_array_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_array_add(UDF_EXEC_ARGS); + DllExport void bbin_array_add_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_array_add_values(UDF_EXEC_ARGS); + DllExport void bbin_array_add_values_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_array_delete(UDF_EXEC_ARGS); + DllExport void bbin_array_delete_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bbin_array_grp_clear(UDF_INIT *, char *, char *); + DllExport void bbin_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bbin_array_grp(UDF_EXEC_ARGS); + DllExport void bbin_array_grp_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bbin_object_grp_clear(UDF_INIT *, char *, char *); + DllExport void bbin_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bbin_object_grp(UDF_EXEC_ARGS); + DllExport void bbin_object_grp_deinit(UDF_INIT*); + + DllExport my_bool bbin_make_object_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_make_object(UDF_EXEC_ARGS); + DllExport void bbin_make_object_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_object_nonull(UDF_EXEC_ARGS); + DllExport void bbin_object_nonull_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_key_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_object_key(UDF_EXEC_ARGS); + DllExport void bbin_object_key_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_object_add(UDF_EXEC_ARGS); + DllExport void bbin_object_add_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_object_delete(UDF_EXEC_ARGS); + DllExport void bbin_object_delete_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_list_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_object_list(UDF_EXEC_ARGS); + DllExport void bbin_object_list_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_object_values(UDF_EXEC_ARGS); + DllExport void bbin_object_values_deinit(UDF_INIT*); + + DllExport my_bool bbin_get_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_get_item(UDF_EXEC_ARGS); + DllExport void bbin_get_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_item_merge_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_item_merge(UDF_EXEC_ARGS); + DllExport void bbin_item_merge_deinit(UDF_INIT*); + + DllExport my_bool bbin_set_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_set_item(UDF_EXEC_ARGS); + DllExport void bbin_set_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_insert_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_insert_item(UDF_EXEC_ARGS); + DllExport void bbin_insert_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_update_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_update_item(UDF_EXEC_ARGS); + DllExport void bbin_update_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_delete_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_delete_item(UDF_EXEC_ARGS); + DllExport void bbin_delete_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_locate_all_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_locate_all(UDF_EXEC_ARGS); + DllExport void bbin_locate_all_deinit(UDF_INIT*); + + DllExport my_bool bbin_file_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_file(UDF_EXEC_ARGS); + DllExport void bbin_file_deinit(UDF_INIT*); +} // extern "C" diff --git a/storage/connect/cmgfam.cpp b/storage/connect/cmgfam.cpp index 579b5b919a7..690c087c2bb 100644 --- a/storage/connect/cmgfam.cpp +++ b/storage/connect/cmgfam.cpp @@ -1,11 +1,11 @@ /************** CMGFAM C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: cmgfam.cpp */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.5 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 20017 */ +/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -29,7 +29,11 @@ #include "reldef.h" #include "filamtxt.h" #include "tabdos.h" +#if defined(BSON_SUPPORT) +#include "tabbson.h" +#else #include "tabjson.h" +#endif // BSON_SUPPORT #include "cmgfam.h" #if defined(UNIX) || defined(UNIV_LINUX) @@ -53,6 +57,7 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL) Pcg.Options = tdp->Options; Pcg.Filter = tdp->Filter; Pcg.Pipe = tdp->Pipe && tdp->Options != NULL; + Lrecl = tdp->Lrecl + tdp->Ending; } else { Pcg.Uristr = NULL; Pcg.Db_name = NULL; @@ -60,21 +65,55 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL) Pcg.Options = NULL; Pcg.Filter = NULL; Pcg.Pipe = false; + Lrecl = 0; } // endif tdp To_Fbt = NULL; Mode = MODE_ANY; Done = false; - Lrecl = tdp->Lrecl + tdp->Ending; } // end of CMGFAM standard constructor - CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp) +#if defined(BSON_SUPPORT) + /***********************************************************************/ +/* Constructors. */ +/***********************************************************************/ +CMGFAM::CMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL) +{ + Cmgp = NULL; + Pcg.Tdbp = NULL; + + if (tdp) { + Pcg.Uristr = tdp->Uri; + Pcg.Db_name = tdp->Schema; + Pcg.Coll_name = tdp->Collname; + Pcg.Options = tdp->Options; + Pcg.Filter = tdp->Filter; + Pcg.Pipe = tdp->Pipe && tdp->Options != NULL; + Lrecl = tdp->Lrecl + tdp->Ending; + } else { + Pcg.Uristr = NULL; + Pcg.Db_name = NULL; + Pcg.Coll_name = NULL; + Pcg.Options = NULL; + Pcg.Filter = NULL; + Pcg.Pipe = false; + Lrecl = 0; + } // endif tdp + + To_Fbt = NULL; + Mode = MODE_ANY; + Done = false; +} // end of CMGFAM standard constructor +#endif // BSON_SUPPORT + +CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp) { + Cmgp = tdfp->Cmgp; Pcg = tdfp->Pcg; To_Fbt = tdfp->To_Fbt; Mode = tdfp->Mode; Done = tdfp->Done; - } // end of CMGFAM copy constructor +} // end of CMGFAM copy constructor /***********************************************************************/ /* Reset: reset position values at the beginning of file. */ diff --git a/storage/connect/cmgfam.h b/storage/connect/cmgfam.h index 7571f5c5309..9c5f91f0d23 100644 --- a/storage/connect/cmgfam.h +++ b/storage/connect/cmgfam.h @@ -1,7 +1,7 @@ /*************** CMGFam H Declares Source Code File (.H) ***************/ -/* Name: cmgfam.h Version 1.5 */ +/* Name: cmgfam.h Version 1.6 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */ /* */ /* This file contains the MongoDB access method classes declares. */ /***********************************************************************/ @@ -20,6 +20,9 @@ class DllExport CMGFAM : public DOSFAM { public: // Constructor CMGFAM(PJDEF tdp); +#if defined(BSON_SUPPORT) + CMGFAM(PBDEF tdp); +#endif // BSON_SUPPORT CMGFAM(PCMGFAM txfp); // Implementation diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index 242e68b5905..e42d9703ad7 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -79,8 +79,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp) if (trace(2)) htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this); - if (tdbp) - { + if (tdbp) { // Attach the new column to the table block if (!tdbp->GetColumns()) tdbp->SetColumns(this); @@ -90,6 +89,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp) colp->Next = this; } // endelse } + } // end of COLBLK copy constructor /***********************************************************************/ diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h index b22933d9ebb..51ab32cfae2 100644 --- a/storage/connect/colblk.h +++ b/storage/connect/colblk.h @@ -62,7 +62,7 @@ class DllExport COLBLK : public XOBJECT { bool IsVirtual(void) {return Cdp->IsVirtual();} bool IsNullable(void) {return Nullable;} void SetNullable(bool b) {Nullable = b;} - + void SetName(PSZ name_var) { Name= name_var; } // Methods virtual void Reset(void); virtual bool Compare(PXOB xp); diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 3b58e8b5a8f..ee62e0cd03e 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -73,8 +73,7 @@ PGLOBAL CntExit(PGLOBAL g) g->Activityp = NULL; } // endif Activityp - PlugExit(g); - g= NULL; + g= PlugExit(g); } // endif g return g; @@ -295,9 +294,9 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, /* its column blocks in mode write (required by XML tables). */ /*******************************************************************/ if (mode == MODE_UPDATE) { - PTDBASE utp; + PTDB utp; - if (!(utp = (PTDBASE)tdbp->Duplicate(g))) { + if (!(utp = tdbp->Duplicate(g))) { sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName()); throw 4; } // endif tp @@ -592,7 +591,7 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) if (!tdbp->IsRemote()) { // Make all the eventual indexes - PTDBDOS tbxp = (PTDBDOS)tdbp; + PTDBASE tbxp = (PTDBASE)tdbp; tbxp->ResetKindex(g, NULL); tbxp->SetKey_Col(NULL); rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 53150f9d8ae..f50290119ae 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -5,7 +5,7 @@ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -102,7 +102,7 @@ int MAPFAM::GetFileLength(PGLOBAL g) bool MAPFAM::OpenTableFile(PGLOBAL g) { char filename[_MAX_PATH]; - int len; + size_t len; MODE mode = Tdbp->GetMode(); PFBLOCK fp; PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr; @@ -170,13 +170,18 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) htrc("CreateFileMap: %s\n", g->Message); return (mode == MODE_READ && rc == ENOENT) - ? PushWarning(g, Tdbp) : true; + ? false : true; +// ? PushWarning(g, Tdbp) : true; --> assert fails into MariaDB } // endif hFile /*******************************************************************/ - /* Get the file size (assuming file is smaller than 4 GB) */ + /* Get the file size. */ /*******************************************************************/ - len = mm.lenL; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + Memory = (char *)mm.memory; if (!len) { // Empty or deleted file diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 67ab120c499..84eab272cc5 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1,11 +1,11 @@ /*********** File AM Txt C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMTXT */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.8 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -48,6 +48,7 @@ #include "plgdbsem.h" #include "filamtxt.h" #include "tabdos.h" +#include "tabjson.h" #if defined(UNIX) || defined(UNIV_LINUX) #include "osutil.h" @@ -804,14 +805,14 @@ int DOSFAM::ReadBuffer(PGLOBAL g) Placed = false; if (trace(2)) - htrc(" About to read: stream=%p To_Buf=%p Buflen=%d\n", - Stream, To_Buf, Buflen); + htrc(" About to read: stream=%p To_Buf=%p Buflen=%d Fpos=%d\n", + Stream, To_Buf, Buflen, Fpos); if (fgets(To_Buf, Buflen, Stream)) { p = To_Buf + strlen(To_Buf) - 1; if (trace(2)) - htrc(" Read: To_Buf=%p p=%c\n", To_Buf, To_Buf, p); + htrc(" Read: To_Buf=%p p=%c\n", To_Buf, p); #if defined(__WIN__) if (Bin) { @@ -1663,3 +1664,456 @@ void BLKFAM::Rewind(void) //Rbuf = 0; commented out in case we reuse last read block } // end of Rewind +/* --------------------------- Class BINFAM -------------------------- */ + +#if 0 +/***********************************************************************/ +/* BIN GetFileLength: returns file size in number of bytes. */ +/***********************************************************************/ +int BINFAM::GetFileLength(PGLOBAL g) +{ + int len; + + if (!Stream) + len = TXTFAM::GetFileLength(g); + else + if ((len = _filelength(_fileno(Stream))) < 0) + sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", To_File); + + xtrc(1, "File length=%d\n", len); + return len; +} // end of GetFileLength + +/***********************************************************************/ +/* Cardinality: returns table cardinality in number of rows. */ +/* This function can be called with a null argument to test the */ +/* availability of Cardinality implementation (1 yes, 0 no). */ +/***********************************************************************/ +int BINFAM::Cardinality(PGLOBAL g) +{ + return (g) ? -1 : 0; +} // end of Cardinality + +/***********************************************************************/ +/* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */ +/***********************************************************************/ +bool BINFAM::OpenTableFile(PGLOBAL g) { + char opmode[4], filename[_MAX_PATH]; + MODE mode = Tdbp->GetMode(); + PDBUSER dbuserp = PlgGetUser(g); + + switch (mode) { + case MODE_READ: + strcpy(opmode, "rb"); + break; + case MODE_WRITE: + strcpy(opmode, "wb"); + break; + default: + sprintf(g->Message, MSG(BAD_OPEN_MODE), mode); + return true; + } // endswitch Mode + + // Now open the file stream + PlugSetPath(filename, To_File, Tdbp->GetPath()); + + if (!(Stream = PlugOpenFile(g, filename, opmode))) { + if (trace(1)) + htrc("%s\n", g->Message); + + return (mode == MODE_READ && errno == ENOENT) + ? PushWarning(g, Tdbp) : true; + } // endif Stream + + if (trace(1)) + htrc("File %s open Stream=%p mode=%s\n", filename, Stream, opmode); + + To_Fb = dbuserp->Openlist; // Keep track of File block + + /*********************************************************************/ + /* Allocate the line buffer. */ + /*********************************************************************/ + return AllocateBuffer(g); +} // end of OpenTableFile +#endif // 0 + +/***********************************************************************/ +/* Allocate the line buffer. For mode Delete a bigger buffer has to */ +/* be allocated because is it also used to move lines into the file. */ +/***********************************************************************/ +bool BINFAM::AllocateBuffer(PGLOBAL g) +{ + MODE mode = Tdbp->GetMode(); + + // Lrecl is Ok + Buflen = Lrecl; + + // Buffer will be allocated separately + if (mode == MODE_ANY) { + xtrc(1, "SubAllocating a buffer of %d bytes\n", Buflen); + To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); + } else if (UseTemp || mode == MODE_DELETE) { + // Have a big buffer to move lines + Dbflen = Buflen * DOS_BUFF_LEN; + DelBuf = PlugSubAlloc(g, NULL, Dbflen); + } // endif mode + + return false; +#if 0 + MODE mode = Tdbp->GetMode(); + + // Lrecl is Ok + Dbflen = Buflen = Lrecl; + + if (trace(1)) + htrc("SubAllocating a buffer of %d bytes\n", Buflen); + + DelBuf = To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); + return false; +#endif // 0 +} // end of AllocateBuffer + +#if 0 +/***********************************************************************/ +/* GetRowID: return the RowID of last read record. */ +/***********************************************************************/ +int BINFAM::GetRowID(void) { + return Rows; +} // end of GetRowID + +/***********************************************************************/ +/* GetPos: return the position of last read record. */ +/***********************************************************************/ +int BINFAM::GetPos(void) { + return Fpos; +} // end of GetPos + +/***********************************************************************/ +/* GetNextPos: return the position of next record. */ +/***********************************************************************/ +int BINFAM::GetNextPos(void) { + return ftell(Stream); +} // end of GetNextPos + +/***********************************************************************/ +/* SetPos: Replace the table at the specified position. */ +/***********************************************************************/ +bool BINFAM::SetPos(PGLOBAL g, int pos) { + Fpos = pos; + + if (fseek(Stream, Fpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos); + return true; + } // endif + + Placed = true; + return false; +} // end of SetPos + +/***********************************************************************/ +/* Record file position in case of UPDATE or DELETE. */ +/***********************************************************************/ +bool BINFAM::RecordPos(PGLOBAL g) { + if ((Fpos = ftell(Stream)) < 0) { + sprintf(g->Message, MSG(FTELL_ERROR), 0, strerror(errno)); + // strcat(g->Message, " (possible wrong ENDING option value)"); + return true; + } // endif Fpos + + return false; +} // end of RecordPos +#endif // 0 + +/***********************************************************************/ +/* ReadBuffer: Read one line for a text file. */ +/***********************************************************************/ +int BINFAM::ReadBuffer(PGLOBAL g) +{ + int rc; + + if (!Stream) + return RC_EF; + + xtrc(2, "ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n", + Tdbp, Tdbp->GetLine(), Placed); + + if (!Placed) { + /*******************************************************************/ + /* Record file position in case of UPDATE or DELETE. */ + /*******************************************************************/ + if (RecordPos(g)) + return RC_FX; + + CurBlk = (int)Rows++; + xtrc(2, "ReadBuffer: CurBlk=%d\n", CurBlk); + } else + Placed = false; + + xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d Fpos=%d\n", + Stream, To_Buf, Buflen, Fpos); + + // Read the prefix giving the row length + if (!fread(&Recsize, sizeof(size_t), 1, Stream)) { + if (!feof(Stream)) { + strcpy(g->Message, "Error reading line prefix\n"); + return RC_FX; + } else + return RC_EF; + + } else if (Recsize > (unsigned)Buflen) { + sprintf(g->Message, "Record too big (Recsize=%zd Buflen=%d)\n", Recsize, Buflen); + return RC_FX; + } // endif Recsize + + if (fread(To_Buf, Recsize, 1, Stream)) { + xtrc(2, " Read: To_Buf=%p Recsize=%zd\n", To_Buf, Recsize); + num_read++; + rc = RC_OK; + } else if (feof(Stream)) { + rc = RC_EF; + } else { +#if defined(__WIN__) + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(0)); +#endif + xtrc(2, "%s\n", g->Message); + rc = RC_FX; + } // endif's fread + + xtrc(2, "ReadBuffer: rc=%d\n", rc); + IsRead = true; + return rc; +} // end of ReadBuffer + +/***********************************************************************/ +/* WriteBuffer: File write routine for BIN access method. */ +/***********************************************************************/ +int BINFAM::WriteBuffer(PGLOBAL g) +{ + int curpos = 0; + bool moved = true; + + // T_Stream is the temporary stream or the table file stream itself + if (!T_Stream) { + if (UseTemp && Tdbp->GetMode() == MODE_UPDATE) { + if (OpenTempFile(g)) + return RC_FX; + + } else + T_Stream = Stream; + + } // endif T_Stream + + if (Tdbp->GetMode() == MODE_UPDATE) { + /*******************************************************************/ + /* Here we simply rewrite a record on itself. There are two cases */ + /* were another method should be used, a/ when Update apply to */ + /* the whole file, b/ when updating the last field of a variable */ + /* length file. The method could be to rewrite a new file, then */ + /* to erase the old one and rename the new updated file. */ + /*******************************************************************/ + curpos = ftell(Stream); + + if (trace(1)) + htrc("Last : %d cur: %d\n", Fpos, curpos); + + if (UseTemp) { + /*****************************************************************/ + /* We are using a temporary file. */ + /* Before writing the updated record, we must eventually copy */ + /* all the intermediate records that have not been updated. */ + /*****************************************************************/ + if (MoveIntermediateLines(g, &moved)) + return RC_FX; + + Spos = curpos; // New start position + } else + // Update is directly written back into the file, + // with this (fast) method, record size cannot change. + if (fseek(Stream, Fpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + return RC_FX; + } // endif + + } // endif mode + + /*********************************************************************/ + /* Prepare writing the line. */ + /*********************************************************************/ +//memcpy(To_Buf, Tdbp->GetLine(), Recsize); + + /*********************************************************************/ + /* Now start the writing process. */ + /*********************************************************************/ + if (fwrite(&Recsize, sizeof(size_t), 1, T_Stream) != 1) { + sprintf(g->Message, "Error %d writing prefix to %s", + errno, To_File); + return RC_FX; + } else if (fwrite(To_Buf, Recsize, 1, T_Stream) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, Recsize, To_File); + return RC_FX; + } // endif fwrite + + if (Tdbp->GetMode() == MODE_UPDATE && moved) + if (fseek(Stream, curpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSEEK_ERROR), strerror(errno)); + return RC_FX; + } // endif + + xtrc(1, "Binary write done\n"); + return RC_OK; +} // end of WriteBuffer + +#if 0 +/***********************************************************************/ +/* Data Base delete line routine for DOS and BLK access methods. */ +/***********************************************************************/ +int DOSFAM::DeleteRecords(PGLOBAL g, int irc) +{ + bool moved; + int curpos = ftell(Stream); + + /*********************************************************************/ + /* There is an alternative here: */ + /* 1 - use a temporary file in which are copied all not deleted */ + /* lines, at the end the original file will be deleted and */ + /* the temporary file renamed to the original file name. */ + /* 2 - directly move the not deleted lines inside the original */ + /* file, and at the end erase all trailing records. */ + /* This will be experimented. */ + /*********************************************************************/ + if (trace(1)) + htrc( + "DOS DeleteDB: rc=%d UseTemp=%d curpos=%d Fpos=%d Tpos=%d Spos=%d\n", + irc, UseTemp, curpos, Fpos, Tpos, Spos); + + if (irc != RC_OK) { + /*******************************************************************/ + /* EOF: position Fpos at the end-of-file position. */ + /*******************************************************************/ + fseek(Stream, 0, SEEK_END); + Fpos = ftell(Stream); + + if (trace(1)) + htrc("Fpos placed at file end=%d\n", Fpos); + + } // endif irc + + if (Tpos == Spos) { + /*******************************************************************/ + /* First line to delete, Open temporary file. */ + /*******************************************************************/ + if (UseTemp) { + if (OpenTempFile(g)) + return RC_FX; + + } else { + /*****************************************************************/ + /* Move of eventual preceding lines is not required here. */ + /* Set the target file as being the source file itself. */ + /* Set the future Tpos, and give Spos a value to block copying. */ + /*****************************************************************/ + T_Stream = Stream; + Spos = Tpos = Fpos; + } // endif UseTemp + + } // endif Tpos == Spos + + /*********************************************************************/ + /* Move any intermediate lines. */ + /*********************************************************************/ + if (MoveIntermediateLines(g, &moved)) + return RC_FX; + + if (irc == RC_OK) { + /*******************************************************************/ + /* Reposition the file pointer and set Spos. */ + /*******************************************************************/ + if (!UseTemp || moved) + if (fseek(Stream, curpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + return RC_FX; + } // endif + + Spos = GetNextPos(); // New start position + + if (trace(1)) + htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); + + } else { + /*******************************************************************/ + /* Last call after EOF has been reached. */ + /* The UseTemp case is treated in CloseTableFile. */ + /*******************************************************************/ + if (!UseTemp & !Abort) { + /*****************************************************************/ + /* Because the chsize functionality is only accessible with a */ + /* system call we must close the file and reopen it with the */ + /* open function (_fopen for MS ??) this is still to be checked */ + /* for compatibility with Text files and other OS's. */ + /*****************************************************************/ + char filename[_MAX_PATH]; + int h; // File handle, return code + + PlugSetPath(filename, To_File, Tdbp->GetPath()); + /*rc=*/ PlugCloseFile(g, To_Fb); + + if ((h= global_open(g, MSGID_OPEN_STRERROR, filename, O_WRONLY)) <= 0) + return RC_FX; + + /*****************************************************************/ + /* Remove extra records. */ + /*****************************************************************/ +#if defined(__WIN__) + if (chsize(h, Tpos)) { + sprintf(g->Message, MSG(CHSIZE_ERROR), strerror(errno)); + close(h); + return RC_FX; + } // endif +#else + if (ftruncate(h, (off_t)Tpos)) { + sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno)); + close(h); + return RC_FX; + } // endif +#endif + + close(h); + + if (trace(1)) + htrc("done, h=%d irc=%d\n", h, irc); + + } // endif !UseTemp + + } // endif irc + + return RC_OK; // All is correct +} // end of DeleteRecords + +/***********************************************************************/ +/* Table file close routine for DOS access method. */ +/***********************************************************************/ +void BINFAM::CloseTableFile(PGLOBAL g, bool abort) +{ + int rc; + + Abort = abort; + rc = PlugCloseFile(g, To_Fb); + xtrc(1, "BIN Close: closing %s rc=%d\n", To_File, rc); + Stream = NULL; // So we can know whether table is open +} // end of CloseTableFile + +/***********************************************************************/ +/* Rewind routine for BIN access method. */ +/***********************************************************************/ +void BINFAM::Rewind(void) +{ + if (Stream) // Can be NULL when making index on void table + rewind(Stream); + + Rows = 0; + OldBlk = CurBlk = -1; +} // end of Rewind +#endif // 0 diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index 1fdae8fcd37..353e06ad3bd 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -1,7 +1,7 @@ /************** FilAMTxt H Declares Source Code File (.H) **************/ -/* Name: FILAMTXT.H Version 1.3 */ +/* Name: FILAMTXT.H Version 1.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* This file contains the file access method classes declares. */ /***********************************************************************/ @@ -15,6 +15,7 @@ typedef class TXTFAM *PTXF; typedef class DOSFAM *PDOSFAM; typedef class BLKFAM *PBLKFAM; +typedef class BINFAM *PBINFAM; typedef class DOSDEF *PDOSDEF; typedef class TDBDOS *PTDBDOS; @@ -210,4 +211,44 @@ class DllExport BLKFAM : public DOSFAM { bool Closing; // True when closing on Update }; // end of class BLKFAM +/***********************************************************************/ +/* This is the DOS/UNIX Access Method class declaration for binary */ +/* files with variable record format (BJSON) */ +/***********************************************************************/ +class DllExport BINFAM : public DOSFAM { +public: + // Constructor + BINFAM(PDOSDEF tdp) : DOSFAM(tdp) {Recsize = 0;} + BINFAM(PBINFAM txfp) : DOSFAM(txfp) {Recsize = txfp->Recsize;} + + // Implementation + virtual AMT GetAmType(void) {return TYPE_AM_BIN;} +//virtual int GetPos(void); +//virtual int GetNextPos(void); + virtual PTXF Duplicate(PGLOBAL g) { return (PTXF)new(g) BINFAM(this); } + + // Methods +//virtual void Reset(void) {TXTFAM::Reset();} +//virtual int GetFileLength(PGLOBAL g); +//virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s) {return s;} + virtual bool AllocateBuffer(PGLOBAL g); +//virtual int GetRowID(void); +//virtual bool RecordPos(PGLOBAL g); +//virtual bool SetPos(PGLOBAL g, int recpos); + virtual int SkipRecord(PGLOBAL g, bool header) {return RC_OK;} +//virtual bool OpenTableFile(PGLOBAL g); + virtual int ReadBuffer(PGLOBAL g); + virtual int WriteBuffer(PGLOBAL g); +//virtual int DeleteRecords(PGLOBAL g, int irc); +//virtual void CloseTableFile(PGLOBAL g, bool abort); +//virtual void Rewind(void); + +//protected: +//virtual int InitDelete(PGLOBAL g, int fpos, int spos); + + // Members + size_t Recsize; // Length of last read or next written record +}; // end of class BINFAM + #endif // __FILAMTXT_H diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index 49283f8c0c7..97f29dddc7e 100644 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -5,7 +5,7 @@ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -1328,7 +1328,7 @@ VCMFAM::VCMFAM(PVCMFAM txfp) : VCTFAM(txfp) bool VCMFAM::OpenTableFile(PGLOBAL g) { char filename[_MAX_PATH]; - int len; + size_t len; MODE mode = Tdbp->GetMode(); PFBLOCK fp = NULL; PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr; @@ -1422,10 +1422,14 @@ bool VCMFAM::OpenTableFile(PGLOBAL g) } // endif hFile /*******************************************************************/ - /* Get the file size (assuming file is smaller than 4 GB) */ + /* Get the file size. */ /*******************************************************************/ - len = mm.lenL; - Memory = (char *)mm.memory; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + Memory = (char *)mm.memory; if (!len) { // Empty or deleted file CloseFileHandle(hFile); @@ -2763,7 +2767,7 @@ bool VMPFAM::OpenTableFile(PGLOBAL g) bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) { char filename[_MAX_PATH]; - int len; + size_t len; HANDLE hFile; MEMMAP mm; PFBLOCK fp; @@ -2817,8 +2821,12 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) /*****************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*****************************************************************/ - len = mm.lenL; - Memcol[i] = (char *)mm.memory; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + Memcol[i] = (char *)mm.memory; if (!len) { // Empty or deleted file CloseFileHandle(hFile); @@ -4110,7 +4118,8 @@ bool BGVFAM::CleanUnusedSpace(PGLOBAL g) } else { int req; - memset(To_Buf, 0, Buflen); + if (To_Buf) + memset(To_Buf, 0, Buflen); for (n = Fpos - Tpos; n > 0; n -= req) { /*****************************************************************/ diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index eb14e846120..79599382693 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -154,10 +154,10 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf) strcpy(filename, pat); #if defined(__WIN__) + int rc; char drive[_MAX_DRIVE], direc[_MAX_DIR]; WIN32_FIND_DATA FileData; HANDLE hSearch; - int rc; _splitpath(filename, drive, direc, NULL, NULL); @@ -1207,7 +1207,7 @@ int UZDFAM::Cardinality(PGLOBAL g) return 1; int card = -1; - int len = GetFileLength(g); + GetFileLength(g); card = Records; diff --git a/storage/connect/global.h b/storage/connect/global.h index d17620861fa..8774285e54b 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -185,7 +185,7 @@ typedef struct _global { /* Global structure */ size_t Sarea_Size; /* Work area size */ PACTIVITY Activityp; char Message[MAX_STR]; /* Message (result, error, trace) */ - ulong More; /* Used by jsonudf */ + size_t More; /* Used by jsonudf */ size_t Saved_Size; /* Saved work area to_free */ bool Createas; /* To pass multi to ext tables */ void *Xchk; /* indexes in create/alter */ @@ -208,7 +208,7 @@ DllExport char *PlugGetMessage(PGLOBAL, int); DllExport short GetLineLength(PGLOBAL); // Console line length #endif // __WIN__ DllExport PGLOBAL PlugInit(LPCSTR, size_t); // Plug global initialization -DllExport int PlugExit(PGLOBAL); // Plug global termination +DllExport PGLOBAL PlugExit(PGLOBAL); // Plug global termination DllExport LPSTR PlugRemoveType(LPSTR, LPCSTR); DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir); DllExport BOOL PlugIsAbsolutePath(LPCSTR path); @@ -220,30 +220,11 @@ DllExport char *PlugDup(PGLOBAL g, const char *str); DllExport void htrc(char const *fmt, ...); DllExport void xtrc(uint, char const* fmt, ...); DllExport uint GetTraceValue(void); +DllExport void* MakePtr(void* memp, size_t offset); +DllExport size_t MakeOff(void* memp, void* ptr); #if defined(__cplusplus) } // extern "C" #endif -/***********************************************************************/ -/* Inline routine definitions. */ -/***********************************************************************/ -/***********************************************************************/ -/* This routine makes a pointer from an offset to a memory pointer. */ -/***********************************************************************/ -inline void* MakePtr(void* memp, size_t offset) { - // return ((offset == 0) ? NULL : &((char*)memp)[offset]); - return (!offset) ? NULL : (char *)memp + offset; -} /* end of MakePtr */ - -/***********************************************************************/ -/* This routine makes an offset from a pointer new format. */ -/***********************************************************************/ -inline size_t MakeOff(void* memp, void* ptr) { -#if defined(_DEBUG) - assert(ptr > memp); -#endif // _DEBUG - return ((!ptr) ? 0 : (size_t)((char*)ptr - (size_t)memp)); -} /* end of MakeOff */ - /*-------------------------- End of Global.H --------------------------*/ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index f8bf8804246..0be262f6a63 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 October 18, 2020"; + char version[]= "Version 1.07.0002 January 27, 2021"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -230,6 +230,9 @@ char *GetUserVariable(PGLOBAL g, const uchar *varname) PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); PQRYRES VirColumns(PGLOBAL g, bool info); PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); +#ifdef BSON_SUPPORT +PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); +#endif // BSON_SUPPORT PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); #if defined(REST_SUPPORT) PQRYRES RESTColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); @@ -251,11 +254,15 @@ bool ExactInfo(void); USETEMP UseTemp(void); int GetConvSize(void); TYPCONV GetTypeConv(void); +int GetDefaultDepth(void); +int GetDefaultPrec(void); bool JsonAllPath(void); char *GetJsonNull(void); -int GetDefaultDepth(void); uint GetJsonGrpSize(void); char *GetJavaWrapper(void); +#if defined(BSON_SUPPORT) +bool Force_Bson(void); +#endif // BSON_SUPPORT size_t GetWorkSize(void); void SetWorkSize(size_t); extern "C" const char *msglang(void); @@ -279,7 +286,12 @@ static char *strz(PGLOBAL g, LEX_CSTRING &ls) { char *str= (char*)PlugSubAlloc(g, NULL, ls.length + 1); - memcpy(str, ls.str, ls.length); + /* + ls.str can be NULL, for example when called with + create_info->connect_string + */ + if (ls.str) + memcpy(str, ls.str, ls.length); str[ls.length]= 0; return str; } // end of strz @@ -397,7 +409,7 @@ static MYSQL_THDVAR_ENUM( // Adding JPATH to all Json table columns static MYSQL_THDVAR_BOOL(json_all_path, PLUGIN_VAR_RQCMDARG, "Adding JPATH to all Json table columns", - NULL, NULL, 0); // NO by default + NULL, NULL, 1); // YES by default // Null representation for JSON values static MYSQL_THDVAR_STR(json_null, @@ -410,11 +422,17 @@ static MYSQL_THDVAR_STR(json_null, static MYSQL_THDVAR_INT(default_depth, PLUGIN_VAR_RQCMDARG, "Default depth used by Json, XML and Mongo discovery", - NULL, NULL, 0, -1, 16, 1); + NULL, NULL, 5, -1, 16, 1); // Defaults to 5 + +// Default precision for doubles +static MYSQL_THDVAR_INT(default_prec, + PLUGIN_VAR_RQCMDARG, + "Default precision used for doubles", + NULL, NULL, 6, 0, 16, 1); // Defaults to 6 // Estimate max number of rows for JSON aggregate functions static MYSQL_THDVAR_UINT(json_grp_size, - PLUGIN_VAR_RQCMDARG, // opt + PLUGIN_VAR_RQCMDARG, // opt "max number of rows for JSON aggregate functions.", NULL, NULL, JSONMAX, 1, INT_MAX, 1); @@ -439,6 +457,13 @@ static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG, #endif // !version 2,3 #endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(BSON_SUPPORT) +// Force using BSON for JSON tables +static MYSQL_THDVAR_BOOL(force_bson, PLUGIN_VAR_RQCMDARG, + "Force using BSON for JSON tables", + NULL, NULL, 0); // NO by default +#endif // BSON_SUPPORT + #if defined(XMSG) || defined(NEWMSG) const char *language_names[]= { @@ -480,6 +505,7 @@ TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);} char *GetJsonNull(void) {return connect_hton ? THDVAR(current_thd, json_null) : NULL;} int GetDefaultDepth(void) {return THDVAR(current_thd, default_depth);} +int GetDefaultPrec(void) {return THDVAR(current_thd, default_prec);} uint GetJsonGrpSize(void) {return connect_hton ? THDVAR(current_thd, json_grp_size) : 10;} size_t GetWorkSize(void) {return (size_t)THDVAR(current_thd, work_size);} @@ -501,6 +527,10 @@ char *GetJavaWrapper(void) bool MongoEnabled(void) {return THDVAR(current_thd, enable_mongo);} #endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(BSON_SUPPORT) +bool Force_Bson(void) {return THDVAR(current_thd, force_bson);} +#endif // BSON_SUPPORT) + #if defined(XMSG) || defined(NEWMSG) extern "C" const char *msglang(void) {return language_names[THDVAR(current_thd, msg_lang)];} @@ -1051,12 +1081,12 @@ static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp) /****************************************************************************/ TABTYPE ha_connect::GetRealType(PTOS pos) { - TABTYPE type; + TABTYPE type= TAB_UNDEF; if (pos || (pos= GetTableOptionStruct())) { type= GetTypeID(pos->type); - if (type == TAB_UNDEF) + if (type == TAB_UNDEF && !pos->http) type= pos->srcdef ? TAB_MYSQL : pos->tabname ? TAB_PRX : TAB_DOS; #if defined(REST_SUPPORT) else if (pos->http) @@ -1064,7 +1094,8 @@ TABTYPE ha_connect::GetRealType(PTOS pos) case TAB_JSON: case TAB_XML: case TAB_CSV: - type = TAB_REST; + case TAB_UNDEF: + type = TAB_REST; break; case TAB_REST: type = TAB_NIY; @@ -1074,8 +1105,7 @@ TABTYPE ha_connect::GetRealType(PTOS pos) } // endswitch type #endif // REST_SUPPORT - } else - type= TAB_UNDEF; + } // endif pos return type; } // end of GetRealType @@ -1387,7 +1417,7 @@ PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef) PTOS options= GetTableOptionStruct(); if (!stricmp(opname, "Connect")) { - LEX_CSTRING cnc= (tshp) ? tshp->connect_string + LEX_CSTRING cnc= (tshp) ? tshp->connect_string : table->s->connect_string; if (cnc.length) @@ -1573,6 +1603,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) // Now get column information pcf->Name= (char*)fp->field_name.str; + chset = (char*)fp->charset()->name; if (fop && fop->special) { pcf->Fieldfmt= (char*)fop->special; @@ -1583,8 +1614,15 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) pcf->Scale= 0; pcf->Opt= (fop) ? (int)fop->opt : 0; - if ((pcf->Length= fp->field_length) < 0) - pcf->Length= 256; // BLOB? + if (fp->field_length >= 0) { + pcf->Length = fp->field_length; + + // length is bytes for Connect, not characters + if (!strnicmp(chset, "utf8", 4)) + pcf->Length /= 3; + + } else + pcf->Length= 256; // BLOB? pcf->Precision= pcf->Length; @@ -1601,8 +1639,6 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) pcf->Fieldfmt= NULL; } // endif fop - chset= (char *)fp->charset()->name; - if (!strcmp(chset, "binary")) v = 'B'; // Binary string @@ -2156,7 +2192,6 @@ int ha_connect::MakeRecord(char *buf) int rc= 0; Field* *field; Field *fp; - my_bitmap_map *org_bitmap; CHARSET_INFO *charset= tdbp->data_charset(); //MY_BITMAP readmap; MY_BITMAP *map; @@ -2170,7 +2205,7 @@ int ha_connect::MakeRecord(char *buf) *table->def_read_set.bitmap, *table->def_write_set.bitmap); // Avoid asserts in field::store() for columns that are not updated - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); // This is for variable_length rows memset(buf, 0, table->s->null_bytes); @@ -2197,7 +2232,7 @@ int ha_connect::MakeRecord(char *buf) continue; htrc("Column %s not found\n", fp->field_name.str); - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(HA_ERR_WRONG_IN_RECORD); } // endif colp @@ -2257,7 +2292,7 @@ int ha_connect::MakeRecord(char *buf) sprintf(buf, "Out of range value %.140s for column '%s' at row %ld", value->GetCharString(val), - fp->field_name.str, + fp->field_name.str, thd->get_stmt_da()->current_row_for_warning()); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf); @@ -2280,7 +2315,7 @@ int ha_connect::MakeRecord(char *buf) memcpy(buf, table->record[0], table->s->stored_rec_length); // This is copied from ha_tina and is necessary to avoid asserts - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(rc); } // end of MakeRecord @@ -2300,7 +2335,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) //PTDBASE tp= (PTDBASE)tdbp; String attribute(attr_buffer, sizeof(attr_buffer), table->s->table_charset); - my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *bmap= dbug_tmp_use_all_columns(table, &table->read_set); const CHARSET_INFO *charset= tdbp->data_charset(); String data_charset_value(data_buffer, sizeof(data_buffer), charset); @@ -2422,7 +2457,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) } // endfor field err: - dbug_tmp_restore_column_map(table->read_set, bmap); + dbug_tmp_restore_column_map(&table->read_set, bmap); return rc; } // end of ScanRecord @@ -2470,7 +2505,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, OPVAL op; Field *fp; const key_range *ranges[2]; - my_bitmap_map *old_map; + MY_BITMAP *old_map; KEY *kfp; KEY_PART_INFO *kpart; @@ -2487,7 +2522,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, both= ranges[0] && ranges[1]; kfp= &table->key_info[active_index]; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (i= 0; i <= 1; i++) { if (ranges[i] == NULL) @@ -2582,11 +2617,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, if ((oom= qry->IsTruncated())) strcpy(g->Message, "Out of memory"); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return oom; err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return true; } // end of MakeKeyWhere @@ -2799,7 +2834,6 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) } else { char buff[256]; String *res, tmp(buff, sizeof(buff), &my_charset_bin); - Item_basic_constant *pval= (Item_basic_constant *)args[i]; PPARM pp= (PPARM)PlugSubAlloc(g, NULL, sizeof(PARM)); // IN and BETWEEN clauses should be col VOP list @@ -2808,6 +2842,8 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) switch (args[i]->real_type()) { case COND::CONST_ITEM: + { + Item *pval= (Item *)args[i]; switch (args[i]->cmp_type()) { case STRING_RESULT: res= pval->val_str(&tmp); @@ -2834,6 +2870,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) DBUG_ASSERT(0); return NULL; } + } break; case COND::CACHE_ITEM: // Possible ??? case COND::NULL_ITEM: // TODO: handle this @@ -3089,7 +3126,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) } else { char buff[256]; String *res, tmp(buff, sizeof(buff), &my_charset_bin); - Item_basic_constant *pval= (Item_basic_constant *)args[i]; + Item *pval= (Item *)args[i]; Item::Type type= args[i]->real_type(); switch (type) { @@ -4501,7 +4538,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool case TAB_VEC: case TAB_REST: case TAB_JSON: - if (options->filename && *options->filename) { +#if defined(BSON_SUPPORT) + case TAB_BSON: +#endif // BSON_SUPPORT + if (options->filename && *options->filename) { if (!quick) { char path[FN_REFLEN], dbpath[FN_REFLEN]; @@ -4532,11 +4572,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool case TAB_DIR: case TAB_ZIP: case TAB_OEM: - if (table && table->pos_in_table_list) { // if SELECT + if (table && table->pos_in_table_list) { // if SELECT #if MYSQL_VERSION_ID > 100200 Switch_to_definer_security_ctx backup_ctx(thd, table->pos_in_table_list); #endif // VERSION_ID > 100200 - return check_global_access(thd, FILE_ACL); } else return check_global_access(thd, FILE_ACL); @@ -4552,9 +4591,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool case TAB_OCCUR: case TAB_PIVOT: case TAB_VIR: + default: // This is temporary until a solution is found return false; - } // endswitch type + } // endswitch type my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0)); return true; @@ -4805,6 +4845,7 @@ int ha_connect::start_stmt(THD *thd, thr_lock_type lock_type) lock.cc by lock_external() and unlock_external() in lock.cc; the section "locking functions for mysql" in lock.cc; copy_data_between_tables() in sql_table.cc. + */ int ha_connect::external_lock(THD *thd, int lock_type) { @@ -4937,11 +4978,11 @@ int ha_connect::external_lock(THD *thd, int lock_type) // Here we do make the new indexes if (tdp->MakeIndex(g, adp, true) == RC_FX) { // Make it a warning to avoid crash - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - 0, g->Message); - rc= 0; - //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - //rc= HA_ERR_INTERNAL_ERROR; + //push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + // 0, g->Message); + //rc= 0; + my_message(ER_TOO_MANY_KEYS, g->Message, MYF(0)); + rc= HA_ERR_INDEX_CORRUPT; } // endif MakeIndex } else if (tdbp->GetDef()->Indexable() == 3) { @@ -5351,7 +5392,8 @@ static char *encode(PGLOBAL g, const char *cnm) */ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ, int len, int dec, char* key, uint tm, const char* rem, - char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) { + char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) +{ #if defined(DEVELOPMENT) // Some client programs regard CHAR(36) as GUID char var = (len > 255 || len == 36) ? 'V' : v; @@ -5428,7 +5470,10 @@ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ, if (fmt && *fmt) { switch (ttp) { case TAB_JSON: error |= sql->append(" JPATH='"); break; - case TAB_XML: error |= sql->append(" XPATH='"); break; +#if defined(BSON_SUPPORT) + case TAB_BSON: error |= sql->append(" JPATH='"); break; +#endif // BSON_SUPPORT + case TAB_XML: error |= sql->append(" XPATH='"); break; default: error |= sql->append(" FIELD_FORMAT='"); } // endswitch ttp @@ -5593,8 +5638,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, String sql(buf, sizeof(buf), system_charset_info); sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info); - user = host = pwd = tbl = src = col = ocl = pic = fcl = skc = rnk = zfn = NULL; - dsn = url = NULL; + user= host= pwd= tbl= src= col= ocl= pic= fcl= skc= rnk= zfn= NULL; + dsn= url= NULL; // Get the useful create options ttp= GetTypeID(topt->type); @@ -5655,7 +5700,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, try { // Check table type - if (ttp == TAB_UNDEF) { + if (ttp == TAB_UNDEF && !topt->http) { topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; ttp= GetTypeID(topt->type); sprintf(g->Message, "No table_type. Was set to %s", topt->type); @@ -5666,11 +5711,21 @@ static int connect_assisted_discovery(handlerton *, THD* thd, goto err; #if defined(REST_SUPPORT) } else if (topt->http) { - switch (ttp) { + if (ttp == TAB_UNDEF) { + topt->type = "JSON"; + ttp= GetTypeID(topt->type); + sprintf(g->Message, "No table_type. Was set to %s", topt->type); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + } // endif ttp + + switch (ttp) { case TAB_JSON: - case TAB_XML: +#if defined(BSON_SUPPORT) + case TAB_BSON: +#endif // BSON_SUPPORT + case TAB_XML: case TAB_CSV: - ttp = TAB_REST; + ttp = TAB_REST; break; default: break; @@ -5853,7 +5908,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT case TAB_JSON: - dsn= strz(g, create_info->connect_string); +#if defined(BSON_SUPPORT) + case TAB_BSON: +#endif // BSON_SUPPORT + dsn= strz(g, create_info->connect_string); if (!fn && !zfn && !mul && !dsn) sprintf(g->Message, "Missing %s file name", topt->type); @@ -6017,8 +6075,15 @@ static int connect_assisted_discovery(handlerton *, THD* thd, qrp= VirColumns(g, fnc == FNC_COL); break; case TAB_JSON: +#if !defined(FORCE_BSON) qrp= JSONColumns(g, db, dsn, topt, fnc == FNC_COL); break; +#endif // !FORCE_BSON +#if defined(BSON_SUPPORT) + case TAB_BSON: + qrp= BSONColumns(g, db, dsn, topt, fnc == FNC_COL); + break; +#endif // BSON_SUPPORT #if defined(JAVA_SUPPORT) case TAB_MONGO: url= strz(g, create_info->connect_string); @@ -6083,6 +6148,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd, goto err; } // endif !nblin + // Restore language type + if (ttp == TAB_REST) + ttp = GetTypeID(topt->type); + for (i= 0; !rc && i < qrp->Nblin; i++) { typ= len= prec= dec= flg= 0; tm= NOT_NULL_FLAG; @@ -6258,7 +6327,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, // Now add the field if (add_field(&sql, ttp, cnm, typ, prec, dec, key, tm, rem, dft, xtra, - fmt, flg, dbf, v)) + fmt, flg, dbf, v)) rc= HA_ERR_OUT_OF_MEM; } // endfor i @@ -6382,6 +6451,9 @@ int ha_connect::create(const char *name, TABLE *table_arg, // Check table type if (type == TAB_UNDEF) { options->type= (options->srcdef) ? "MYSQL" : +#if defined(REST_SUPPORT) + (options->http) ? "JSON" : +#endif // REST_SUPPORT (options->tabname) ? "PROXY" : "DOS"; type= GetTypeID(options->type); sprintf(g->Message, "No table_type. Will be set to %s", options->type); @@ -6399,7 +6471,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, DBUG_RETURN(HA_ERR_INTERNAL_ERROR); inward= IsFileType(type) && !options->filename && - (type != TAB_JSON || !cnc.length); + ((type != TAB_JSON && type != TAB_BSON) || !cnc.length); if (options->data_charset) { const CHARSET_INFO *data_charset; @@ -6757,8 +6829,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (trace(1)) htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas); -#if defined(ZIP_SUPPORT) if (options->zipped) { +#if defined(ZIP_SUPPORT) // Check whether the zip entry must be made from a file PCSZ fn= GetListOption(g, "Load", options->oplist, NULL); @@ -6780,9 +6852,11 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endif LoadFile } // endif fn - +#else // !ZIP_SUPPORT + my_message(ER_UNKNOWN_ERROR, "Option ZIP not supported", MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); +#endif // !ZIP_SUPPORT } // endif zipped -#endif // ZIP_SUPPORT // To check whether indexes have to be made or remade if (!g->Xchk) { @@ -7384,7 +7458,8 @@ static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(json_null), MYSQL_SYSVAR(json_all_path), MYSQL_SYSVAR(default_depth), - MYSQL_SYSVAR(json_grp_size), + MYSQL_SYSVAR(default_prec), + MYSQL_SYSVAR(json_grp_size), #if defined(JAVA_SUPPORT) MYSQL_SYSVAR(jvm_path), MYSQL_SYSVAR(class_path), @@ -7394,7 +7469,10 @@ static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(enable_mongo), #endif // JAVA_SUPPORT || CMGO_SUPPORT MYSQL_SYSVAR(cond_push), - NULL +#if defined(BSON_SUPPORT) + MYSQL_SYSVAR(force_bson), +#endif // BSON_SUPPORT + NULL }; maria_declare_plugin(connect) diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 2dab385a36f..2cb75e0adc1 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -766,6 +766,7 @@ void JDBConn::AddJars(PSTRG jpop, char sep) /***********************************************************************/ bool JDBConn::Connect(PJPARM sop) { + int irc = RC_FX; bool err = false; jint rc; jboolean jt = (trace(1)); diff --git a/storage/connect/jmgfam.cpp b/storage/connect/jmgfam.cpp index 30f6279146d..2d45753ec63 100644 --- a/storage/connect/jmgfam.cpp +++ b/storage/connect/jmgfam.cpp @@ -1,15 +1,15 @@ /************ JMONGO FAM C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: jmgfam.cpp */ /* ------------- */ -/* Version 1.0 */ +/* Version 1.1 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 20017 */ +/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ -/* This program are the Java MongoDB access method classes. */ +/* This program are the Java MongoDB access method classes. */ /* */ /***********************************************************************/ @@ -49,7 +49,11 @@ #include "reldef.h" #include "filamtxt.h" #include "tabdos.h" +#if defined(BSON_SUPPORT) +#include "tabbson.h" +#else #include "tabjson.h" +#endif // BSON_SUPPORT #include "jmgfam.h" #if defined(UNIX) || defined(UNIV_LINUX) @@ -92,10 +96,38 @@ JMGFAM::JMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL) Version = tdp->Version; Lrecl = tdp->Lrecl + tdp->Ending; Curpos = 0; -} // end of JMGFAM standard constructor +} // end of JMGFAM Json standard constructor + +#if defined(BSON_SUPPORT) +JMGFAM::JMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL) +{ + Jcp = NULL; + Ops.Driver = tdp->Schema; + Ops.Url = tdp->Uri; + Ops.User = NULL; + Ops.Pwd = NULL; + Ops.Scrollable = false; + Ops.Fsize = 0; + Ops.Version = tdp->Version; + To_Fbt = NULL; + Mode = MODE_ANY; + Uristr = tdp->Uri; + Db_name = tdp->Schema; + Coll_name = tdp->Collname; + Options = tdp->Options; + Filter = tdp->Filter; + Wrapname = tdp->Wrapname; + Done = false; + Pipe = tdp->Pipe; + Version = tdp->Version; + Lrecl = tdp->Lrecl + tdp->Ending; + Curpos = 0; +} // end of JMGFAM Bson standard constructor +#endif // BSON_SUPPORT JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp) { + Jcp = tdfp->Jcp; //Client = tdfp->Client; //Database = NULL; //Collection = tdfp->Collection; @@ -114,6 +146,7 @@ JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp) Done = tdfp->Done; Pipe = tdfp->Pipe; Version = tdfp->Version; + Curpos = tdfp->Curpos; } // end of JMGFAM copy constructor /***********************************************************************/ diff --git a/storage/connect/jmgfam.h b/storage/connect/jmgfam.h index 5c80d993833..c5d9d1f57e6 100644 --- a/storage/connect/jmgfam.h +++ b/storage/connect/jmgfam.h @@ -1,7 +1,7 @@ /************** MongoFam H Declares Source Code File (.H) **************/ -/* Name: jmgfam.h Version 1.0 */ +/* Name: jmgfam.h Version 1.1 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */ /* */ /* This file contains the JAVA MongoDB access method classes declares */ /***********************************************************************/ @@ -25,6 +25,9 @@ class DllExport JMGFAM : public DOSFAM { public: // Constructor JMGFAM(PJDEF tdp); +#if defined(BSON_SUPPORT) + JMGFAM(PBDEF tdp); +#endif // BSON_SUPPORT JMGFAM(PJMGFAM txfp); // Implementation diff --git a/storage/connect/jmgoconn.cpp b/storage/connect/jmgoconn.cpp index c80800bd897..8a12fffbd05 100644 --- a/storage/connect/jmgoconn.cpp +++ b/storage/connect/jmgoconn.cpp @@ -121,7 +121,7 @@ JMgoConn::JMgoConn(PGLOBAL g, PCSZ collname, PCSZ wrapper) /***********************************************************************/ void JMgoConn::AddJars(PSTRG jpop, char sep) { -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) if (m_Version == 2) { jpop->Append(sep); // jpop->Append("C:/Eclipse/workspace/MongoWrap2/bin"); @@ -134,7 +134,7 @@ void JMgoConn::AddJars(PSTRG jpop, char sep) jpop->Append(sep); jpop->Append("C:/mongo-java-driver/mongo-java-driver-3.4.2.jar"); } // endif m_Version -#endif // DEVELOPMENT +#endif // BSON_SUPPORT } // end of AddJars /***********************************************************************/ diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index f6dca8146d6..bd9c4fac7a1 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1,7 +1,7 @@ /*************** json CPP Declares Source Code File (.H) ***************/ -/* Name: json.cpp Version 1.4 */ +/* Name: json.cpp Version 1.5 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ /* */ /* This file contains the JSON classes functions. */ /***********************************************************************/ @@ -21,7 +21,7 @@ #include "plgdbsem.h" #include "json.h" -#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0) +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) #if defined(__WIN__) #define EL "\r\n" @@ -38,16 +38,16 @@ class SE_Exception { public: - SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {} - ~SE_Exception() {} + SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {} + ~SE_Exception() {} - unsigned int nSE; - PEXCEPTION_RECORD eRec; + unsigned int nSE; + PEXCEPTION_RECORD eRec; }; // end of class SE_Exception void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) { - throw SE_Exception(u, pExp->ExceptionRecord); + throw SE_Exception(u, pExp->ExceptionRecord); } // end of trans_func char *GetExceptionDesc(PGLOBAL g, unsigned int e); @@ -58,46 +58,60 @@ char *GetJsonNull(void); /***********************************************************************/ /* IsNum: check whether this string is all digits. */ /***********************************************************************/ -bool IsNum(PSZ s) -{ - for (char *p = s; *p; p++) - if (*p == ']') - break; - else if (!isdigit(*p) || *p == '-') - return false; +bool IsNum(PSZ s) { + for (char* p = s; *p; p++) + if (*p == ']') + break; + else if (!isdigit(*p) || *p == '-') + return false; - return true; -} // end of IsNum + return true; +} // end of IsNum /***********************************************************************/ /* NextChr: return the first found '[' or Sep pointer. */ /***********************************************************************/ -char *NextChr(PSZ s, char sep) +char* NextChr(PSZ s, char sep) { - char *p1 = strchr(s, '['); - char *p2 = strchr(s, sep); + char* p1 = strchr(s, '['); + char* p2 = strchr(s, sep); - if (!p2) - return p1; - else if (p1) - return MY_MIN(p1, p2); + if (!p2) + return p1; + else if (p1) + return MY_MIN(p1, p2); - return p2; -} // end of NextChr + return p2; +} // end of NextChr +#if 0 +/***********************************************************************/ +/* Allocate a VAL structure, make sure common field and Nd are zeroed. */ +/***********************************************************************/ +PVL AllocVal(PGLOBAL g, JTYP type) +{ + PVL vlp = (PVL)PlugSubAlloc(g, NULL, sizeof(VAL)); + + vlp->LLn = 0; + vlp->Nd = 0; + vlp->Type = type; + return vlp; +} // end of AllocVal +#endif // 0 /***********************************************************************/ /* Parse a json string. */ /* Note: when pretty is not known, the caller set pretty to 3. */ /***********************************************************************/ -PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) +PJSON ParseJson(PGLOBAL g, char* s, size_t len, int* ptyp, bool* comma) { - int i, pretty = (ptyp) ? *ptyp : 3; - bool b = false, pty[3] = {true,true,true}; - PJSON jsp = NULL, jp = NULL; + int i, pretty = (ptyp) ? *ptyp : 3; + bool b = false, pty[3] = { true,true,true }; + PJSON jsp = NULL; + PJDOC jdp = NULL; - if (trace(1)) - htrc("ParseJson: s=%.10s len=%d\n", s, len); + if (trace(1)) + htrc("ParseJson: s=%.10s len=%zd\n", s, len); if (!s || !len) { strcpy(g->Message, "Void JSON object"); @@ -105,116 +119,402 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) } else if (comma) *comma = false; - // Trying to guess the pretty format - if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) - pty[0] = false; - - try { - jp = new(g) JSON(); - jp->s = s; - jp->len = len; - jp->pty = pty; - - for (i = 0; i < jp->len; i++) - switch (s[i]) { - case '[': - if (jsp) - jsp = jp->ParseAsArray(g, i, pretty, ptyp); - else - jsp = jp->ParseArray(g, ++i); - - break; - case '{': - if (jsp) - jsp = jp->ParseAsArray(g, i, pretty, ptyp); - else if (!(jsp = jp->ParseObject(g, ++i))) - throw 2; - - break; - case ' ': - case '\t': - case '\n': - case '\r': - break; - case ',': - if (jsp && (pretty == 1 || pretty == 3)) { - if (comma) - *comma = true; - - pty[0] = pty[2] = false; - break; - } // endif pretty - - sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); - throw 3; - case '(': - b = true; - break; - case ')': - if (b) { - b = false; - break; - } // endif b - /* falls through */ - default: - if (jsp) - jsp = jp->ParseAsArray(g, i, pretty, ptyp); - else if (!(jsp = jp->ParseValue(g, i))) - throw 4; - - break; - }; // endswitch s[i] - - if (!jsp) - sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN(len, 50), s); - else if (ptyp && pretty == 3) { - *ptyp = 3; // Not recognized pretty - - for (i = 0; i < 3; i++) - if (pty[i]) { - *ptyp = i; - break; - } // endif pty - - } // endif ptyp - - } catch (int n) { - if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); - jsp = NULL; - } catch (const char *msg) { - strcpy(g->Message, msg); - jsp = NULL; - } // end catch - - return jsp; + // Trying to guess the pretty format + if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) + pty[0] = false; + + try { + jdp = new(g) JDOC; + jdp->s = s; + jdp->len = len; + jdp->pty = pty; + + for (i = 0; i < jdp->len; i++) + switch (s[i]) { + case '[': + if (jsp) + jsp = jdp->ParseAsArray(g, i, pretty, ptyp); + else + jsp = jdp->ParseArray(g, ++i); + + break; + case '{': + if (jsp) + jsp = jdp->ParseAsArray(g, i, pretty, ptyp); + else if (!(jsp = jdp->ParseObject(g, ++i))) + throw 2; + + break; + case ' ': + case '\t': + case '\n': + case '\r': + break; + case ',': + if (jsp && (pretty == 1 || pretty == 3)) { + if (comma) + *comma = true; + + pty[0] = pty[2] = false; + break; + } // endif pretty + + sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); + throw 3; + case '(': + b = true; + break; + case ')': + if (b) { + b = false; + break; + } // endif b + /* falls through */ + default: + if (jsp) + jsp = jdp->ParseAsArray(g, i, pretty, ptyp); + else if (!(jsp = jdp->ParseValue(g, i))) + throw 4; + + break; + }; // endswitch s[i] + + if (!jsp) + sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); + else if (ptyp && pretty == 3) { + *ptyp = 3; // Not recognized pretty + + for (i = 0; i < 3; i++) + if (pty[i]) { + *ptyp = i; + break; + } // endif pty + + } // endif ptyp + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + jsp = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + jsp = NULL; + } // end catch + + return jsp; } // end of ParseJson /***********************************************************************/ +/* Serialize a JSON document tree: */ +/***********************************************************************/ +PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) { + PSZ str = NULL; + bool b = false, err = true; + JOUT* jp; + FILE* fs = NULL; + PJDOC jdp = NULL; + + g->Message[0] = 0; + + try { + jdp = new(g) JDOC; // MUST BE ALLOCATED BEFORE jp !!!!! + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + throw 1; + } else if (!fn) { + // Serialize to a string + jp = new(g) JOUTSTR(g); + b = pretty == 1; + } else { + if (!(fs = fopen(fn, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "w", (int)errno, fn); + strcat(strcat(g->Message, ": "), strerror(errno)); + throw 2; + } else if (pretty >= 2) { + // Serialize to a pretty file + jp = new(g)JOUTPRT(g, fs); + } else { + // Serialize to a flat file + b = true; + jp = new(g)JOUTFILE(g, fs, pretty); + } // endif's + + } // endif's + + jdp->SetJp(jp); + + switch (jsp->GetType()) { + case TYPE_JAR: + err = jdp->SerializeArray((PJAR)jsp, b); + break; + case TYPE_JOB: + err = ((b && jp->Prty()) && jp->WriteChr('\t')); + err |= jdp->SerializeObject((PJOB)jsp); + break; + case TYPE_JVAL: + err = jdp->SerializeValue((PJVAL)jsp); + break; + default: + strcpy(g->Message, "Invalid json tree"); + } // endswitch Type + + if (fs) { + fputs(EL, fs); + fclose(fs); + str = (err) ? NULL : strcpy(g->Message, "Ok"); + } else if (!err) { + str = ((JOUTSTR*)jp)->Strp; + jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); + } else { + if (!g->Message[0]) + strcpy(g->Message, "Error in Serialize"); + + } // endif's + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + str = NULL; + } // end catch + + return str; +} // end of Serialize + + +/* -------------------------- Class JOUTSTR -------------------------- */ + +/***********************************************************************/ +/* JOUTSTR constructor. */ +/***********************************************************************/ +JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g) { + PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; + + N = 0; + Max = pph->FreeBlk; + Max = (Max > 32) ? Max - 32 : Max; + Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet +} // end of JOUTSTR constructor + +/***********************************************************************/ +/* Concatenate a string to the Serialize string. */ +/***********************************************************************/ +bool JOUTSTR::WriteStr(const char* s) { + if (s) { + size_t len = strlen(s); + + if (N + len > Max) + return true; + + memcpy(Strp + N, s, len); + N += len; + return false; + } else + return true; + +} // end of WriteStr + +/***********************************************************************/ +/* Concatenate a character to the Serialize string. */ +/***********************************************************************/ +bool JOUTSTR::WriteChr(const char c) { + if (N + 1 > Max) + return true; + + Strp[N++] = c; + return false; +} // end of WriteChr + +/***********************************************************************/ +/* Escape and Concatenate a string to the Serialize string. */ +/***********************************************************************/ +bool JOUTSTR::Escape(const char* s) +{ + if (s) { + WriteChr('"'); + + for (unsigned int i = 0; s[i]; i++) + switch (s[i]) { + case '"': + case '\\': + case '\t': + case '\n': + case '\r': + case '\b': + case '\f': WriteChr('\\'); + // fall through + default: + WriteChr(s[i]); + break; + } // endswitch s[i] + + WriteChr('"'); + } else + WriteStr("null"); + + return false; +} // end of Escape + +/* ------------------------- Class JOUTFILE -------------------------- */ + +/***********************************************************************/ +/* Write a string to the Serialize file. */ +/***********************************************************************/ +bool JOUTFILE::WriteStr(const char* s) +{ + // This is temporary + fputs(s, Stream); + return false; +} // end of WriteStr + +/***********************************************************************/ +/* Write a character to the Serialize file. */ +/***********************************************************************/ +bool JOUTFILE::WriteChr(const char c) +{ + // This is temporary + fputc(c, Stream); + return false; +} // end of WriteChr + +/***********************************************************************/ +/* Escape and Concatenate a string to the Serialize string. */ +/***********************************************************************/ +bool JOUTFILE::Escape(const char* s) +{ + // This is temporary + if (s) { + fputc('"', Stream); + + for (unsigned int i = 0; s[i]; i++) + switch (s[i]) { + case '"': fputs("\\\"", Stream); break; + case '\\': fputs("\\\\", Stream); break; + case '\t': fputs("\\t", Stream); break; + case '\n': fputs("\\n", Stream); break; + case '\r': fputs("\\r", Stream); break; + case '\b': fputs("\\b", Stream); break; + case '\f': fputs("\\f", Stream); break; + default: + fputc(s[i], Stream); + break; + } // endswitch s[i] + + fputc('"', Stream); + } else + fputs("null", Stream); + + return false; +} // end of Escape + +/* ------------------------- Class JOUTPRT --------------------------- */ + +/***********************************************************************/ +/* Write a string to the Serialize pretty file. */ +/***********************************************************************/ +bool JOUTPRT::WriteStr(const char* s) +{ + // This is temporary + if (B) { + fputs(EL, Stream); + M--; + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + B = false; + } // endif B + + fputs(s, Stream); + return false; +} // end of WriteStr + +/***********************************************************************/ +/* Write a character to the Serialize pretty file. */ +/***********************************************************************/ +bool JOUTPRT::WriteChr(const char c) +{ + switch (c) { + case ':': + fputs(": ", Stream); + break; + case '{': + case '[': +#if 0 + if (M) + fputs(EL, Stream); + + for (int i = 0; i < M; i++) + fputc('\t', Stream); +#endif // 0 + + fputc(c, Stream); + fputs(EL, Stream); + M++; + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + break; + case '}': + case ']': + M--; + fputs(EL, Stream); + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + fputc(c, Stream); + B = true; + break; + case ',': + fputc(c, Stream); + fputs(EL, Stream); + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + B = false; + break; + default: + fputc(c, Stream); + } // endswitch c + + return false; +} // end of WriteChr + +/* --------------------------- Class JDOC ---------------------------- */ + +/***********************************************************************/ /* Parse several items as being in an array. */ /***********************************************************************/ -PJAR JSON::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp) +PJAR JDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp) { - if (pty[0] && (!pretty || pretty > 2)) { - PJAR jsp; + if (pty[0] && (!pretty || pretty > 2)) { + PJAR jsp; - if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3) - *ptyp = (pty[0]) ? 0 : 3; + if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3) + *ptyp = (pty[0]) ? 0 : 3; - return jsp; - } else - strcpy(g->Message, "More than one item in file"); + return jsp; + } else + strcpy(g->Message, "More than one item in file"); - return NULL; + return NULL; } // end of ParseAsArray /***********************************************************************/ /* Parse a JSON Array. */ /***********************************************************************/ -PJAR JSON::ParseArray(PGLOBAL g, int& i) +PJAR JDOC::ParseArray(PGLOBAL g, int& i) { - int level = 0; - bool b = (!i); + int level = 0; + bool b = (!i); PJAR jarp = new(g) JARRAY; for (; i < len; i++) @@ -235,11 +535,11 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i) jarp->InitArray(g); return jarp; - case '\n': - if (!b) - pty[0] = pty[1] = false; - case '\r': - case ' ': + case '\n': + if (!b) + pty[0] = pty[1] = false; + case '\r': + case ' ': case '\t': break; default: @@ -247,17 +547,17 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i) sprintf(g->Message, "Unexpected value near %.*s", ARGS); throw 1; } else - jarp->AddValue(g, ParseValue(g, i)); + jarp->AddArrayValue(g, ParseValue(g, i)); level = (b) ? 1 : 2; break; }; // endswitch s[i] - if (b) { - // Case of Pretty == 0 - jarp->InitArray(g); - return jarp; - } // endif b + if (b) { + // Case of Pretty == 0 + jarp->InitArray(g); + return jarp; + } // endif b throw ("Unexpected EOF in array"); } // end of ParseArray @@ -265,10 +565,10 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i) /***********************************************************************/ /* Parse a JSON Object. */ /***********************************************************************/ -PJOB JSON::ParseObject(PGLOBAL g, int& i) +PJOB JDOC::ParseObject(PGLOBAL g, int& i) { PSZ key; - int level = 0; + int level = -1; PJOB jobp = new(g) JOBJECT; PJPR jpp = NULL; @@ -276,7 +576,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) switch (s[i]) { case '"': if (level < 2) { - key = ParseString(g, ++i); + key = ParseString(g, ++i); jpp = jobp->AddPair(g, key); level = 1; } else { @@ -287,7 +587,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) break; case ':': if (level == 1) { - jpp->Val = ParseValue(g, ++i); + jpp->Val = ParseValue(g, ++i); level = 2; } else { sprintf(g->Message, "Unexpected ':' near %.*s", ARGS); @@ -304,16 +604,16 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) break; case '}': - if (level < 2) { + if (level == 0 || level == 1) { sprintf(g->Message, "Unexpected '}' near %.*s", ARGS); throw 2; } // endif level return jobp; - case '\n': - pty[0] = pty[1] = false; - case '\r': - case ' ': + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': case '\t': break; default: @@ -329,38 +629,42 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) /***********************************************************************/ /* Parse a JSON Value. */ /***********************************************************************/ -PJVAL JSON::ParseValue(PGLOBAL g, int& i) +PJVAL JDOC::ParseValue(PGLOBAL g, int& i) { - int n; PJVAL jvp = new(g) JVALUE; for (; i < len; i++) - switch (s[i]) { - case '\n': - pty[0] = pty[1] = false; - case '\r': - case ' ': - case '\t': - break; - default: - goto suite; - } // endswitch + switch (s[i]) { + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + goto suite; + } // endswitch suite: switch (s[i]) { case '[': - jvp->Jsp = ParseArray(g, ++i); + jvp->Jsp = ParseArray(g, ++i); + jvp->DataType = TYPE_JSON; break; case '{': - jvp->Jsp = ParseObject(g, ++i); + jvp->Jsp = ParseObject(g, ++i); + jvp->DataType = TYPE_JSON; break; case '"': - jvp->Value = AllocateValue(g, ParseString(g, ++i), TYPE_STRING); +// jvp->Val = AllocVal(g, TYPE_STRG); + jvp->Strp = ParseString(g, ++i); + jvp->DataType = TYPE_STRG; break; case 't': if (!strncmp(s + i, "true", 4)) { - n = 1; - jvp->Value = AllocateValue(g, &n, TYPE_TINY); +// jvp->Val = AllocVal(g, TYPE_BOOL); + jvp->B = true; + jvp->DataType = TYPE_BOOL; i += 3; } else goto err; @@ -368,24 +672,26 @@ PJVAL JSON::ParseValue(PGLOBAL g, int& i) break; case 'f': if (!strncmp(s + i, "false", 5)) { - n = 0; - jvp->Value = AllocateValue(g, &n, TYPE_TINY); +// jvp->Val = AllocVal(g, TYPE_BOOL); + jvp->B = false; + jvp->DataType = TYPE_BOOL; i += 4; } else goto err; break; case 'n': - if (!strncmp(s + i, "null", 4)) + if (!strncmp(s + i, "null", 4)) { + jvp->DataType = TYPE_NULL; i += 3; - else + } else goto err; break; case '-': default: if (s[i] == '-' || isdigit(s[i])) - jvp->Value = ParseNumeric(g, i); + ParseNumeric(g, i, jvp); else goto err; @@ -401,7 +707,7 @@ err: /***********************************************************************/ /* Unescape and parse a JSON string. */ /***********************************************************************/ -char *JSON::ParseString(PGLOBAL g, int& i) +char *JDOC::ParseString(PGLOBAL g, int& i) { uchar *p; int n = 0; @@ -488,15 +794,15 @@ char *JSON::ParseString(PGLOBAL g, int& i) /***********************************************************************/ /* Parse a JSON numeric value. */ /***********************************************************************/ -PVAL JSON::ParseNumeric(PGLOBAL g, int& i) +void JDOC::ParseNumeric(PGLOBAL g, int& i, PJVAL vlp) { char buf[50]; int n = 0; short nd = 0; - bool has_dot = false; - bool has_e = false; - bool found_digit = false; - PVAL valp = NULL; + bool has_dot = false; + bool has_e = false; + bool found_digit = false; +//PVL vlp = NULL; for (; i < len; i++) { switch (s[i]) { @@ -545,15 +851,27 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i) if (has_dot || has_e) { double dv = strtod(buf, NULL); - valp = AllocateValue(g, &dv, TYPE_DOUBLE, nd); +// vlp = AllocVal(g, TYPE_DBL); + vlp->F = dv; + vlp->Nd = nd; + vlp->DataType = TYPE_DBL; } else { long long iv = strtoll(buf, NULL, 10); - valp = AllocateValue(g, &iv, TYPE_BIGINT); + if (iv > INT_MAX32 || iv < INT_MIN32) { +// vlp = AllocVal(g, TYPE_BINT); + vlp->LLn = iv; + vlp->DataType = TYPE_BINT; + } else { +// vlp = AllocVal(g, TYPE_INTG); + vlp->N = (int)iv; + vlp->DataType = TYPE_INTG; + } // endif iv + } // endif has i--; // Unstack following character - return valp; + return; } else throw("No digit found"); @@ -562,137 +880,59 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i) } // end of ParseNumeric /***********************************************************************/ -/* Serialize a JSON tree: */ -/***********************************************************************/ -PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) -{ - PSZ str = NULL; - bool b = false, err = true; - JOUT *jp; - FILE *fs = NULL; - - g->Message[0] = 0; - - try { - if (!jsp) { - strcpy(g->Message, "Null json tree"); - throw 1; - } else if (!fn) { - // Serialize to a string - jp = new(g) JOUTSTR(g); - b = pretty == 1; - } else { - if (!(fs = fopen(fn, "wb"))) { - sprintf(g->Message, MSG(OPEN_MODE_ERROR), - "w", (int)errno, fn); - strcat(strcat(g->Message, ": "), strerror(errno)); - throw 2; - } else if (pretty >= 2) { - // Serialize to a pretty file - jp = new(g)JOUTPRT(g, fs); - } else { - // Serialize to a flat file - b = true; - jp = new(g)JOUTFILE(g, fs, pretty); - } // endif's - - } // endif's - - switch (jsp->GetType()) { - case TYPE_JAR: - err = SerializeArray(jp, (PJAR)jsp, b); - break; - case TYPE_JOB: - err = ((b && jp->Prty()) && jp->WriteChr('\t')); - err |= SerializeObject(jp, (PJOB)jsp); - break; - case TYPE_JVAL: - err = SerializeValue(jp, (PJVAL)jsp); - break; - default: - strcpy(g->Message, "Invalid json tree"); - } // endswitch Type - - if (fs) { - fputs(EL, fs); - fclose(fs); - str = (err) ? NULL : strcpy(g->Message, "Ok"); - } else if (!err) { - str = ((JOUTSTR*)jp)->Strp; - jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); - } else { - if (!g->Message[0]) - strcpy(g->Message, "Error in Serialize"); - - } // endif's - - } catch (int n) { - if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); - str = NULL; - } catch (const char *msg) { - strcpy(g->Message, msg); - str = NULL; - } // end catch - - return str; -} // end of Serialize - -/***********************************************************************/ /* Serialize a JSON Array. */ /***********************************************************************/ -bool SerializeArray(JOUT *js, PJAR jarp, bool b) +bool JDOC::SerializeArray(PJAR jarp, bool b) { bool first = true; - if (b) { - if (js->Prty()) { - if (js->WriteChr('[')) - return true; - else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t'))) - return true; + if (b) { + if (js->Prty()) { + if (js->WriteChr('[')) + return true; + else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t'))) + return true; - } // endif Prty + } // endif Prty - } else if (js->WriteChr('[')) - return true; + } else if (js->WriteChr('[')) + return true; for (int i = 0; i < jarp->size(); i++) { if (first) first = false; - else if ((!b || js->Prty()) && js->WriteChr(',')) + else if ((!b || js->Prty()) && js->WriteChr(',')) return true; - else if (b) { - if (js->Prty() < 2 && js->WriteStr(EL)) - return true; - else if (js->Prty() == 1 && js->WriteChr('\t')) - return true; + else if (b) { + if (js->Prty() < 2 && js->WriteStr(EL)) + return true; + else if (js->Prty() == 1 && js->WriteChr('\t')) + return true; - } // endif b + } // endif b - if (SerializeValue(js, jarp->GetValue(i))) + if (SerializeValue(jarp->GetArrayValue(i))) return true; } // endfor i - if (b && js->Prty() == 1 && js->WriteStr(EL)) + if (b && js->Prty() == 1 && js->WriteStr(EL)) return true; - return ((!b || js->Prty()) && js->WriteChr(']')); + return ((!b || js->Prty()) && js->WriteChr(']')); } // end of SerializeArray /***********************************************************************/ /* Serialize a JSON Object. */ /***********************************************************************/ -bool SerializeObject(JOUT *js, PJOB jobp) +bool JDOC::SerializeObject(PJOB jobp) { bool first = true; if (js->WriteChr('{')) return true; - for (PJPR pair = jobp->First; pair; pair = pair->Next) { + for (PJPR pair = jobp->GetFirst(); pair; pair = pair->Next) { if (first) first = false; else if (js->WriteChr(',')) @@ -702,7 +942,7 @@ bool SerializeObject(JOUT *js, PJOB jobp) js->WriteStr(pair->Key) || js->WriteChr('"') || js->WriteChr(':') || - SerializeValue(js, pair->Val)) + SerializeValue(pair->Val)) return true; } // endfor i @@ -713,259 +953,70 @@ bool SerializeObject(JOUT *js, PJOB jobp) /***********************************************************************/ /* Serialize a JSON Value. */ /***********************************************************************/ -bool SerializeValue(JOUT *js, PJVAL jvp) +bool JDOC::SerializeValue(PJVAL jvp) { + char buf[64]; PJAR jap; PJOB jop; - PVAL valp; + //PVL vlp; if ((jap = jvp->GetArray())) - return SerializeArray(js, jap, false); + return SerializeArray(jap, false); else if ((jop = jvp->GetObject())) - return SerializeObject(js, jop); - else if (!(valp = jvp->Value) || valp->IsNull()) - return js->WriteStr("null"); - else switch (valp->GetType()) { - case TYPE_TINY: - return js->WriteStr(valp->GetTinyValue() ? "true" : "false"); - case TYPE_STRING: - return js->Escape(valp->GetCharValue()); + return SerializeObject(jop); +//else if (!(vlp = jvp->Val)) +// return js->WriteStr("null"); + else switch (jvp->DataType) { + case TYPE_BOOL: + return js->WriteStr(jvp->B ? "true" : "false"); + case TYPE_STRG: + case TYPE_DTM: + return js->Escape(jvp->Strp); + case TYPE_INTG: + sprintf(buf, "%d", jvp->N); + return js->WriteStr(buf); + case TYPE_BINT: + sprintf(buf, "%lld", jvp->LLn); + return js->WriteStr(buf); + case TYPE_DBL: + sprintf(buf, "%.*lf", jvp->Nd, jvp->F); + return js->WriteStr(buf); + case TYPE_NULL: + return js->WriteStr("null"); default: - if (valp->IsTypeNum()) { - char buf[32]; - - return js->WriteStr(valp->GetCharString(buf)); - } // endif valp - - } // endswitch Type + return js->WriteStr("???"); // TODO + } // endswitch Type - strcpy(js->g->Message, "Unrecognized value"); - return true; + strcpy(js->g->Message, "Unrecognized value"); + return true; } // end of SerializeValue -/* -------------------------- Class JOUTSTR -------------------------- */ - -/***********************************************************************/ -/* JOUTSTR constructor. */ -/***********************************************************************/ -JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g) -{ - PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; - - N = 0; - Max = pph->FreeBlk; - Max = (Max > 32) ? Max - 32 : Max; - Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet -} // end of JOUTSTR constructor - -/***********************************************************************/ -/* Concatenate a string to the Serialize string. */ -/***********************************************************************/ -bool JOUTSTR::WriteStr(const char *s) -{ - if (s) { - size_t len = strlen(s); - - if (N + len > Max) - return true; - - memcpy(Strp + N, s, len); - N += len; - return false; - } else - return true; - -} // end of WriteStr - -/***********************************************************************/ -/* Concatenate a character to the Serialize string. */ -/***********************************************************************/ -bool JOUTSTR::WriteChr(const char c) -{ - if (N + 1 > Max) - return true; - - Strp[N++] = c; - return false; -} // end of WriteChr - -/***********************************************************************/ -/* Escape and Concatenate a string to the Serialize string. */ -/***********************************************************************/ -bool JOUTSTR::Escape(const char *s) -{ - WriteChr('"'); - - for (unsigned int i = 0; s[i]; i++) - switch (s[i]) { - case '"': - case '\\': - case '\t': - case '\n': - case '\r': - case '\b': - case '\f': WriteChr('\\'); - // fall through - default: - WriteChr(s[i]); - break; - } // endswitch s[i] - - WriteChr('"'); - return false; -} // end of Escape - -/* ------------------------- Class JOUTFILE -------------------------- */ - -/***********************************************************************/ -/* Write a string to the Serialize file. */ -/***********************************************************************/ -bool JOUTFILE::WriteStr(const char *s) -{ - // This is temporary - fputs(s, Stream); - return false; -} // end of WriteStr - -/***********************************************************************/ -/* Write a character to the Serialize file. */ -/***********************************************************************/ -bool JOUTFILE::WriteChr(const char c) -{ - // This is temporary - fputc(c, Stream); - return false; -} // end of WriteChr - -/***********************************************************************/ -/* Escape and Concatenate a string to the Serialize string. */ -/***********************************************************************/ -bool JOUTFILE::Escape(const char *s) -{ - // This is temporary - fputc('"', Stream); - - for (unsigned int i = 0; s[i]; i++) - switch (s[i]) { - case '"': fputs("\\\"", Stream); break; - case '\\': fputs("\\\\", Stream); break; - case '\t': fputs("\\t", Stream); break; - case '\n': fputs("\\n", Stream); break; - case '\r': fputs("\\r", Stream); break; - case '\b': fputs("\\b", Stream); break; - case '\f': fputs("\\f", Stream); break; - default: - fputc(s[i], Stream); - break; - } // endswitch s[i] - - fputc('"', Stream); - return false; -} // end of Escape - -/* ------------------------- Class JOUTPRT --------------------------- */ - -/***********************************************************************/ -/* Write a string to the Serialize pretty file. */ -/***********************************************************************/ -bool JOUTPRT::WriteStr(const char *s) -{ - // This is temporary - if (B) { - fputs(EL, Stream); - M--; - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - B = false; - } // endif B - - fputs(s, Stream); - return false; -} // end of WriteStr - -/***********************************************************************/ -/* Write a character to the Serialize pretty file. */ -/***********************************************************************/ -bool JOUTPRT::WriteChr(const char c) -{ - switch (c) { - case ':': - fputs(": ", Stream); - break; - case '{': - case '[': -#if 0 - if (M) - fputs(EL, Stream); - - for (int i = 0; i < M; i++) - fputc('\t', Stream); -#endif // 0 - - fputc(c, Stream); - fputs(EL, Stream); - M++; - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - break; - case '}': - case ']': - M--; - fputs(EL, Stream); - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - fputc(c, Stream); - B = true; - break; - case ',': - fputc(c, Stream); - fputs(EL, Stream); - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - B = false; - break; - default: - fputc(c, Stream); - } // endswitch c - -return false; -} // end of WriteChr - /* -------------------------- Class JOBJECT -------------------------- */ /***********************************************************************/ /* Return the number of pairs in this object. */ /***********************************************************************/ -int JOBJECT::GetSize(bool b) -{ - if (b) { - // Return only non null pairs - int n = 0; +int JOBJECT::GetSize(bool b) { + int n = 0; - for (PJPR jpp = First; jpp; jpp = jpp->Next) - if (jpp->Val && !jpp->Val->IsNull()) - n++; + for (PJPR jpp = First; jpp; jpp = jpp->Next) + // If b return only non null pairs + if (!b || jpp->Val && !jpp->Val->IsNull()) + n++; - return n; - } else - return Size; - -} // end of GetSize + return n; +} // end of GetSize /***********************************************************************/ /* Add a new pair to an Object. */ /***********************************************************************/ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) { - PJPR jpp = new(g) JPAIR(key); + PJPR jpp = (PJPR)PlugSubAlloc(g, NULL, sizeof(JPAIR)); + + jpp->Key = key; + jpp->Next = NULL; + jpp->Val = NULL; if (Last) Last->Next = jpp; @@ -973,7 +1024,6 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) First = jpp; Last = jpp; - Size++; return jpp; } // end of AddPair @@ -982,13 +1032,13 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) /***********************************************************************/ PJAR JOBJECT::GetKeyList(PGLOBAL g) { - PJAR jarp = new(g) JARRAY(); + PJAR jarp = new(g) JARRAY(); - for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddValue(g, new(g) JVALUE(g, jpp->GetKey())); + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddArrayValue(g, new(g) JVALUE(g, jpp->Key)); - jarp->InitArray(g); - return jarp; + jarp->InitArray(g); + return jarp; } // end of GetKeyList /***********************************************************************/ @@ -996,19 +1046,19 @@ PJAR JOBJECT::GetKeyList(PGLOBAL g) /***********************************************************************/ PJAR JOBJECT::GetValList(PGLOBAL g) { - PJAR jarp = new(g) JARRAY(); + PJAR jarp = new(g) JARRAY(); - for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddValue(g, jpp->GetVal()); + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddArrayValue(g, jpp->Val); - jarp->InitArray(g); - return jarp; + jarp->InitArray(g); + return jarp; } // end of GetValList /***********************************************************************/ /* Get the value corresponding to the given key. */ /***********************************************************************/ -PJVAL JOBJECT::GetValue(const char* key) +PJVAL JOBJECT::GetKeyValue(const char* key) { for (PJPR jp = First; jp; jp = jp->Next) if (!strcmp(jp->Key, key)) @@ -1020,43 +1070,57 @@ PJVAL JOBJECT::GetValue(const char* key) /***********************************************************************/ /* Return the text corresponding to all keys (XML like). */ /***********************************************************************/ -PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) +PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text) { - int n; + if (First) { + bool b; - if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 0); - text[0] = 0; - n = 1; - } else - n = 0; + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(' '); - if (!First && n) - return NULL; - else if (n == 1 && Size == 1 && !strcmp(First->GetKey(), "$date")) { - int i; + b = false; + } // endif text - First->Val->GetText(g, text); - i = (text[1] == '-' ? 2 : 1); + if (b && !First->Next && !strcmp(First->Key, "$date")) { + int i; + PSZ s; - if (IsNum(text + i)) { - // Date is in milliseconds - int j = (int)strlen(text); + First->Val->GetText(g, text); + s = text->GetStr(); + i = (s[1] == '-' ? 2 : 1); - if (j >= 4 + i) - text[j - 3] = 0; // Change it to seconds - else - strcpy(text, " 0"); + if (IsNum(s + i)) { + // Date is in milliseconds + int j = text->GetLength(); - } // endif text + if (j >= 4 + i) { + s[j - 3] = 0; // Change it to seconds + text->SetLength((uint)strlen(s)); + } else + text->Set(" 0"); + + } // endif text + + } else for (PJPR jp = First; jp; jp = jp->Next) { + jp->Val->GetText(g, text); + + if (jp->Next) + text->Append(' '); - } else for (PJPR jp = First; jp; jp = jp->Next) - jp->Val->GetText(g, text); + } // endfor jp - if (n) - PlugSubAlloc(g, NULL, strlen(text) + 1); + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b - return text + n; + } // endif First + + return NULL; } // end of GetText; /***********************************************************************/ @@ -1064,25 +1128,25 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) /***********************************************************************/ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) { - if (jsp->GetType() != TYPE_JOB) { - strcpy(g->Message, "Second argument is not an object"); - return true; - } // endif Type + if (jsp->GetType() != TYPE_JOB) { + strcpy(g->Message, "Second argument is not an object"); + return true; + } // endif Type - PJOB jobp = (PJOB)jsp; + PJOB jobp = (PJOB)jsp; - for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) - SetValue(g, jpp->GetVal(), jpp->GetKey()); + for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) + SetKeyValue(g, jpp->Val, jpp->Key); - return false; + return false; } // end of Marge; /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) +void JOBJECT::SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key) { - PJPR jp; + PJPR jp; for (jp = First; jp; jp = jp->Next) if (!strcmp(jp->Key, key)) { @@ -1102,15 +1166,14 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) /***********************************************************************/ void JOBJECT::DeleteKey(PCSZ key) { - PJPR jp, *pjp = &First; + PJPR jp, *pjp = &First; - for (jp = First; jp; jp = jp->Next) - if (!strcmp(jp->Key, key)) { - *pjp = jp->Next; - Size--; - break; - } else - pjp = &jp->Next; + for (jp = First; jp; jp = jp->Next) + if (!strcmp(jp->Key, key)) { + *pjp = jp->Next; + break; + } else + pjp = &jp->Next; } // end of DeleteKey @@ -1129,23 +1192,35 @@ bool JOBJECT::IsNull(void) /* -------------------------- Class JARRAY --------------------------- */ /***********************************************************************/ +/* JARRAY constructor. */ +/***********************************************************************/ +JARRAY::JARRAY(void) : JSON() +{ + Type = TYPE_JAR; + Size = 0; + Alloc = 0; + First = Last = NULL; + Mvals = NULL; +} // end of JARRAY constructor + +/***********************************************************************/ /* Return the number of values in this object. */ /***********************************************************************/ int JARRAY::GetSize(bool b) { - if (b) { - // Return only non null values - int n = 0; + if (b) { + // Return only non null values + int n = 0; - for (PJVAL jvp = First; jvp; jvp = jvp->Next) - if (!jvp->IsNull()) - n++; + for (PJVAL jvp = First; jvp; jvp = jvp->Next) + if (!jvp->IsNull()) + n++; - return n; - } else - return Size; + return n; + } else + return Size; -} // end of GetSize +} // end of GetSize /***********************************************************************/ /* Make the array of values from the values list. */ @@ -1166,19 +1241,19 @@ void JARRAY::InitArray(PGLOBAL g) } // endif Size for (i = 0, jvp = First; jvp; jvp = jvp->Next) - if (!jvp->Del) { - Mvals[i++] = jvp; - pjvp = &jvp->Next; - Last = jvp; - } else - *pjvp = jvp->Next; + if (!jvp->Del) { + Mvals[i++] = jvp; + pjvp = &jvp->Next; + Last = jvp; + } else + *pjvp = jvp->Next; } // end of InitArray /***********************************************************************/ /* Get the Nth value of an Array. */ /***********************************************************************/ -PJVAL JARRAY::GetValue(int i) +PJVAL JARRAY::GetArrayValue(int i) { if (Mvals && i >= 0 && i < Size) return Mvals[i]; @@ -1189,33 +1264,33 @@ PJVAL JARRAY::GetValue(int i) /***********************************************************************/ /* Add a Value to the Array Value list. */ /***********************************************************************/ -PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x) +PJVAL JARRAY::AddArrayValue(PGLOBAL g, PJVAL jvp, int *x) { if (!jvp) jvp = new(g) JVALUE; - if (x) { - int i = 0, n = *x; - PJVAL jp, *jpp = &First; + if (x) { + int i = 0, n = *x; + PJVAL jp, *jpp = &First; - for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next)); + for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next)); - (*jpp) = jvp; + (*jpp) = jvp; - if (!(jvp->Next = jp)) - Last = jvp; + if (!(jvp->Next = jp)) + Last = jvp; - } else { - if (!First) - First = jvp; - else if (Last == First) - First->Next = Last = jvp; - else - Last->Next = jvp; + } else { + if (!First) + First = jvp; + else if (Last == First) + First->Next = Last = jvp; + else + Last->Next = jvp; - Last = jvp; - Last->Next = NULL; - } // endif x + Last = jvp; + Last->Next = NULL; + } // endif x return jvp; } // end of AddValue @@ -1225,24 +1300,24 @@ PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x) /***********************************************************************/ bool JARRAY::Merge(PGLOBAL g, PJSON jsp) { - if (jsp->GetType() != TYPE_JAR) { - strcpy(g->Message, "Second argument is not an array"); - return true; - } // endif Type + if (jsp->GetType() != TYPE_JAR) { + strcpy(g->Message, "Second argument is not an array"); + return true; + } // endif Type - PJAR arp = (PJAR)jsp; + PJAR arp = (PJAR)jsp; - for (int i = 0; i < jsp->size(); i++) - AddValue(g, arp->GetValue(i)); + for (int i = 0; i < arp->size(); i++) + AddArrayValue(g, arp->GetArrayValue(i)); - InitArray(g); - return false; + InitArray(g); + return false; } // end of Merge /***********************************************************************/ /* Set the nth Value of the Array Value list. */ /***********************************************************************/ -bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n) +bool JARRAY::SetArrayValue(PGLOBAL g, PJVAL jvp, int n) { int i = 0; PJVAL jp, *jpp = &First; @@ -1259,25 +1334,42 @@ bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n) /***********************************************************************/ /* Return the text corresponding to all values. */ /***********************************************************************/ -PSZ JARRAY::GetText(PGLOBAL g, PSZ text) +PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) { - int n; - PJVAL jp; + if (First) { + bool b; + PJVAL jp; + + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(" ("); + else + text->Append('('); + + b = false; + } + + for (jp = First; jp; jp = jp->Next) { + jp->GetText(g, text); + + if (jp->Next) + text->Append(", "); + else if (!b) + text->Append(')'); - if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 0); - text[0] = 0; - n = 1; - } else - n = 0; + } // endfor jp - for (jp = First; jp; jp = jp->Next) - jp->GetText(g, text); + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b - if (n) - PlugSubAlloc(g, NULL, strlen(text) + 1); + } // endif First - return text + n; + return NULL; } // end of GetText; /***********************************************************************/ @@ -1285,13 +1377,13 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) /***********************************************************************/ bool JARRAY::DeleteValue(int n) { - PJVAL jvp = GetValue(n); + PJVAL jvp = GetArrayValue(n); - if (jvp) { - jvp->Del = true; - return false; - } else - return true; + if (jvp) { + jvp->Del = true; + return false; + } else + return true; } // end of DeleteValue @@ -1310,32 +1402,60 @@ bool JARRAY::IsNull(void) /* -------------------------- Class JVALUE- -------------------------- */ /***********************************************************************/ -/* Constructor for a JSON. */ +/* Constructor for a JVALUE. */ /***********************************************************************/ JVALUE::JVALUE(PJSON jsp) : JSON() { - if (jsp->GetType() == TYPE_JVAL) { - Jsp = jsp->GetJsp(); - Value = jsp->GetValue(); - } else { - Jsp = jsp; - Value = NULL; - } // endif Type + if (jsp->GetType() == TYPE_JVAL) { + PJVAL jvp = (PJVAL)jsp; + +// Val = ((PJVAL)jsp)->GetVal(); + if (jvp->DataType == TYPE_JSON) { + Jsp = jvp->GetJsp(); + DataType = TYPE_JSON; + Nd = 0; + } else { + LLn = jvp->LLn; // Must be LLn on 32 bit machines + Nd = jvp->Nd; + DataType = jvp->DataType; + } // endelse Jsp + + } else { + Jsp = jsp; +// Val = NULL; + DataType = TYPE_JSON; + Nd = 0; + } // endif Type - Next = NULL; - Del = false; - Size = 1; -} // end of JVALUE constructor + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor +#if 0 /***********************************************************************/ -/* Constructor for a Value with a given string or numeric value. */ +/* Constructor for a JVALUE with a given string or numeric value. */ /***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() +JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() { Jsp = NULL; - Value = AllocateValue(g, valp); + Val = vlp; Next = NULL; Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor +#endif // 0 + +/***********************************************************************/ +/* Constructor for a JVALUE with a given string or numeric value. */ +/***********************************************************************/ +JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() { + Jsp = NULL; +//Val = NULL; + SetValue(g, valp); + Next = NULL; + Del = false; + Type = TYPE_JVAL; } // end of JVALUE constructor /***********************************************************************/ @@ -1343,23 +1463,40 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() /***********************************************************************/ JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON() { - Jsp = NULL; - Value = AllocateValue(g, (void*)strp, TYPE_STRING); - Next = NULL; - Del = false; + Jsp = NULL; +//Val = AllocVal(g, TYPE_STRG); + Strp = (char*)strp; + DataType = TYPE_STRG; + Nd = 0; + Next = NULL; + Del = false; + Type = TYPE_JVAL; } // end of JVALUE constructor /***********************************************************************/ +/* Set or reset all Jvalue members. */ +/***********************************************************************/ +void JVALUE::Clear(void) +{ + Jsp = NULL; + Next = NULL; + Type = TYPE_JVAL; + Del = false; + Nd = 0; + DataType = TYPE_NULL; +} // end of Clear + +/***********************************************************************/ /* Returns the type of the Value's value. */ /***********************************************************************/ JTYP JVALUE::GetValType(void) { - if (Jsp) + if (DataType == TYPE_JSON) return Jsp->GetType(); - else if (Value) - return (JTYP)Value->GetType(); +//else if (Val) +// return Val->Type; else - return TYPE_NULL; + return DataType; } // end of GetValType @@ -1368,7 +1505,7 @@ JTYP JVALUE::GetValType(void) /***********************************************************************/ PJOB JVALUE::GetObject(void) { - if (Jsp && Jsp->GetType() == TYPE_JOB) + if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JOB) return (PJOB)Jsp; return NULL; @@ -1379,18 +1516,46 @@ PJOB JVALUE::GetObject(void) /***********************************************************************/ PJAR JVALUE::GetArray(void) { - if (Jsp && Jsp->GetType() == TYPE_JAR) + if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JAR) return (PJAR)Jsp; return NULL; } // end of GetArray /***********************************************************************/ -/* Return the Value's Integer value. */ +/* Return the Value's as a Value class. */ /***********************************************************************/ -int JVALUE::GetInteger(void) +PVAL JVALUE::GetValue(PGLOBAL g) { - return (Value) ? Value->GetIntValue() : 0; + PVAL valp = NULL; + + if (DataType != TYPE_JSON) + if (DataType == TYPE_STRG) + valp = AllocateValue(g, Strp, DataType, Nd); + else + valp = AllocateValue(g, &LLn, DataType, Nd); + + return valp; +} // end of GetValue + +/***********************************************************************/ +/* Return the Value's Integer value. */ +/***********************************************************************/ +int JVALUE::GetInteger(void) { + int n; + + switch (DataType) { + case TYPE_INTG: n = N; break; + case TYPE_DBL: n = (int)F; break; + case TYPE_DTM: + case TYPE_STRG: n = atoi(Strp); break; + case TYPE_BOOL: n = (B) ? 1 : 0; break; + case TYPE_BINT: n = (int)LLn; break; + default: + n = 0; + } // endswitch Type + + return n; } // end of GetInteger /***********************************************************************/ @@ -1398,7 +1563,20 @@ int JVALUE::GetInteger(void) /***********************************************************************/ long long JVALUE::GetBigint(void) { - return (Value) ? Value->GetBigintValue() : 0; + long long lln; + + switch (DataType) { + case TYPE_BINT: lln = LLn; break; + case TYPE_INTG: lln = (long long)N; break; + case TYPE_DBL: lln = (long long)F; break; + case TYPE_DTM: + case TYPE_STRG: lln = atoll(Strp); break; + case TYPE_BOOL: lln = (B) ? 1 : 0; break; + default: + lln = 0; + } // endswitch Type + + return lln; } // end of GetBigint /***********************************************************************/ @@ -1406,75 +1584,157 @@ long long JVALUE::GetBigint(void) /***********************************************************************/ double JVALUE::GetFloat(void) { - return (Value) ? Value->GetFloatValue() : 0.0; + double d; + + switch (DataType) { + case TYPE_DBL: d = F; break; + case TYPE_BINT: d = (double)LLn; break; + case TYPE_INTG: d = (double)N; break; + case TYPE_DTM: + case TYPE_STRG: d = atof(Strp); break; + case TYPE_BOOL: d = (B) ? 1.0 : 0.0; break; + default: + d = 0.0; + } // endswitch Type + + return d; } // end of GetFloat /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ JVALUE::GetString(PGLOBAL g) +PSZ JVALUE::GetString(PGLOBAL g, char *buff) { - char *p; - - if (Value) { - char buf[32]; - - if ((p = Value->GetCharString(buf)) == buf) - p = PlugDup(g, buf); - - } else - p = NULL; - - return p; + char buf[32]; + char *p = (buff) ? buff : buf; + + switch (DataType) { + case TYPE_DTM: + case TYPE_STRG: + p = Strp; + break; + case TYPE_INTG: + sprintf(p, "%d", N); + break; + case TYPE_BINT: + sprintf(p, "%lld", LLn); + break; + case TYPE_DBL: + sprintf(p, "%.*lf", Nd, F); + break; + case TYPE_BOOL: + p = (char*)((B) ? "true" : "false"); + break; + case TYPE_NULL: + p = (char*)"null"; + break; + default: + p = NULL; + } // endswitch Type + + + return (p == buf) ? (char*)PlugDup(g, buf) : p; } // end of GetString /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ JVALUE::GetText(PGLOBAL g, PSZ text) +PSZ JVALUE::GetText(PGLOBAL g, PSTRG text) { - if (Jsp) + if (DataType == TYPE_JSON) return Jsp->GetText(g, text); - char buf[32]; - PSZ s = (Value) ? Value->GetCharString(buf) : NULL; + char buff[32]; + PSZ s = (DataType == TYPE_NULL) ? NULL : GetString(g, buff); - if (s) - strcat(strcat(text, " "), s); - else if (GetJsonNull()) - strcat(strcat(text, " "), GetJsonNull()); + if (s) + text->Append(s); + else if (GetJsonNull()) + text->Append(GetJsonNull()); - return text; + return NULL; } // end of GetText void JVALUE::SetValue(PJSON jsp) { - if (jsp && jsp->GetType() == TYPE_JVAL) { - Jsp = jsp->GetJsp(); - Value = jsp->GetValue(); - } else { - Jsp = jsp; - Value = NULL; - } // endif Type + if (DataType == TYPE_JSON && jsp->GetType() == TYPE_JVAL) { + Jsp = jsp->GetJsp(); + Nd = ((PJVAL)jsp)->Nd; + DataType = ((PJVAL)jsp)->DataType; + // Val = ((PJVAL)jsp)->GetVal(); + } else { + Jsp = jsp; + DataType = TYPE_JSON; + } // endif Type + +} // end of SetValue; + +void JVALUE::SetValue(PGLOBAL g, PVAL valp) +{ +//if (!Val) +// Val = AllocVal(g, TYPE_VAL); + + if (!valp || valp->IsNull()) { + DataType = TYPE_NULL; + } else switch (valp->GetType()) { + case TYPE_DATE: + if (((DTVAL*)valp)->IsFormatted()) + Strp = PlugDup(g, valp->GetCharValue()); + else { + char buf[32]; + + Strp = PlugDup(g, valp->GetCharString(buf)); + } // endif Formatted + + DataType = TYPE_DTM; + break; + case TYPE_STRING: + Strp = PlugDup(g, valp->GetCharValue()); + DataType = TYPE_STRG; + break; + case TYPE_DOUBLE: + case TYPE_DECIM: + F = valp->GetFloatValue(); + + if (IsTypeNum(valp->GetType())) + Nd = valp->GetValPrec(); + + DataType = TYPE_DBL; + break; + case TYPE_TINY: + B = valp->GetTinyValue() != 0; + DataType = TYPE_BOOL; + case TYPE_INT: + N = valp->GetIntValue(); + DataType = TYPE_INTG; + break; + case TYPE_BIGINT: + LLn = valp->GetBigintValue(); + DataType = TYPE_BINT; + break; + default: + sprintf(g->Message, "Unsupported typ %d\n", valp->GetType()); + throw(777); + } // endswitch Type -} // end of SetValue; +} // end of SetValue /***********************************************************************/ /* Set the Value's value as the given integer. */ /***********************************************************************/ void JVALUE::SetInteger(PGLOBAL g, int n) { - Value = AllocateValue(g, &n, TYPE_INT); - Jsp = NULL; + N = n; + DataType = TYPE_INTG; } // end of SetInteger /***********************************************************************/ /* Set the Value's Boolean value as a tiny integer. */ /***********************************************************************/ -void JVALUE::SetTiny(PGLOBAL g, char n) +void JVALUE::SetBool(PGLOBAL g, bool b) { - Value = AllocateValue(g, &n, TYPE_TINY); - Jsp = NULL; + B = b; + DataType = TYPE_BOOL; } // end of SetTiny /***********************************************************************/ @@ -1482,8 +1742,8 @@ void JVALUE::SetTiny(PGLOBAL g, char n) /***********************************************************************/ void JVALUE::SetBigint(PGLOBAL g, long long ll) { - Value = AllocateValue(g, &ll, TYPE_BIGINT); - Jsp = NULL; + LLn = ll; + DataType = TYPE_BINT; } // end of SetBigint /***********************************************************************/ @@ -1491,17 +1751,19 @@ void JVALUE::SetBigint(PGLOBAL g, long long ll) /***********************************************************************/ void JVALUE::SetFloat(PGLOBAL g, double f) { - Value = AllocateValue(g, &f, TYPE_DOUBLE, 6); - Jsp = NULL; + F = f; + Nd = 6; + DataType = TYPE_DBL; } // end of SetFloat /***********************************************************************/ /* Set the Value's value as the given string. */ /***********************************************************************/ -void JVALUE::SetString(PGLOBAL g, PSZ s, short c) +void JVALUE::SetString(PGLOBAL g, PSZ s, int ci) { - Value = AllocateValue(g, s, TYPE_STRING, c); - Jsp = NULL; + Strp = s; + Nd = ci; + DataType = TYPE_STRG; } // end of SetString /***********************************************************************/ @@ -1509,6 +1771,239 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c) /***********************************************************************/ bool JVALUE::IsNull(void) { - return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true; + return (DataType == TYPE_JSON) ? Jsp->IsNull() : DataType == TYPE_NULL; } // end of IsNull + +/* ---------------------------- Class SWAP --------------------------- */ + +/***********************************************************************/ +/* Replace all pointers by offsets or the opposite. */ +/***********************************************************************/ +void SWAP::SwapJson(PJSON jsp, bool move) +{ + if (move) + MoffJson(jsp); + else + MptrJson((PJSON)MakeOff(Base, jsp)); + + return; +} // end of SwapJson + +/***********************************************************************/ +/* Replace all pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffJson(PJSON jsp) { + size_t res = 0; + + if (jsp) + switch (jsp->Type) { + case TYPE_JAR: + res = MoffArray((PJAR)jsp); + break; + case TYPE_JOB: + res = MoffObject((PJOB)jsp); + break; + case TYPE_JVAL: + res = MoffJValue((PJVAL)jsp); + break; + default: + throw "Invalid json tree"; + } // endswitch Type + + return res; +} // end of MoffJson + +/***********************************************************************/ +/* Replace all array pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffArray(PJAR jarp) +{ + if (jarp->First) { + for (int i = 0; i < jarp->Size; i++) + jarp->Mvals[i] = (PJVAL)MakeOff(Base, jarp->Mvals[i]); + + jarp->Mvals = (PJVAL*)MakeOff(Base, jarp->Mvals); + jarp->First = (PJVAL)MoffJValue(jarp->First); + jarp->Last = (PJVAL)MakeOff(Base, jarp->Last); + } // endif First + + return MakeOff(Base, jarp); +} // end of MoffArray + +/***********************************************************************/ +/* Replace all object pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffObject(PJOB jobp) { + if (jobp->First) { + jobp->First = (PJPR)MoffPair(jobp->First); + jobp->Last = (PJPR)MakeOff(Base, jobp->Last); + } // endif First + + return MakeOff(Base, jobp); +} // end of MoffObject + +/***********************************************************************/ +/* Replace all pair pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffPair(PJPR jpp) { + jpp->Key = (PCSZ)MakeOff(Base, (void*)jpp->Key); + + if (jpp->Val) + jpp->Val = (PJVAL)MoffJValue(jpp->Val); + + if (jpp->Next) + jpp->Next = (PJPR)MoffPair(jpp->Next); + + return MakeOff(Base, jpp); +} // end of MoffPair + +/***********************************************************************/ +/* Replace all jason value pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffJValue(PJVAL jvp) { + if (!jvp->Del) { + if (jvp->DataType == TYPE_JSON) + jvp->Jsp = (PJSON)MoffJson(jvp->Jsp); + else if (jvp->DataType == TYPE_STRG) + jvp->Strp = (PSZ)MakeOff(Base, (jvp->Strp)); + +// if (jvp->Val) +// jvp->Val = (PVL)MoffVal(jvp->Val); + + } // endif Del + + if (jvp->Next) + jvp->Next = (PJVAL)MoffJValue(jvp->Next); + + return MakeOff(Base, jvp); +} // end of MoffJValue + +#if 0 +/***********************************************************************/ +/* Replace string pointers by offset. */ +/***********************************************************************/ +size_t SWAP::MoffVal(PVL vlp) { + if (vlp->Type == TYPE_STRG) + vlp->Strp = (PSZ)MakeOff(Base, (vlp->Strp)); + + return MakeOff(Base, vlp); +} // end of MoffVal +#endif // 0 + +/***********************************************************************/ +/* Replace all offsets by pointers. */ +/***********************************************************************/ +PJSON SWAP::MptrJson(PJSON ojp) { // ojp is an offset + PJSON jsp = (PJSON)MakePtr(Base, (size_t)ojp); + + if (ojp) + switch (jsp->Type) { + case TYPE_JAR: + jsp = MptrArray((PJAR)ojp); + break; + case TYPE_JOB: + jsp = MptrObject((PJOB)ojp); + break; + case TYPE_JVAL: + jsp = MptrJValue((PJVAL)ojp); + break; + default: + throw "Invalid json tree"; + } // endswitch Type + + return jsp; +} // end of MptrJson + +/***********************************************************************/ +/* Replace all array offsets by pointers. */ +/***********************************************************************/ +PJAR SWAP::MptrArray(PJAR ojar) { + PJAR jarp = (PJAR)MakePtr(Base, (size_t)ojar); + + jarp = (PJAR)new((long long)jarp) JARRAY(0); + + if (jarp->First) { + jarp->Mvals = (PJVAL*)MakePtr(Base, (size_t)jarp->Mvals); + + for (int i = 0; i < jarp->Size; i++) + jarp->Mvals[i] = (PJVAL)MakePtr(Base, (size_t)jarp->Mvals[i]); + + jarp->First = (PJVAL)MptrJValue(jarp->First); + jarp->Last = (PJVAL)MakePtr(Base, (size_t)jarp->Last); + } // endif First + + return jarp; +} // end of MptrArray + +/***********************************************************************/ +/* Replace all object offsets by pointers. */ +/***********************************************************************/ +PJOB SWAP::MptrObject(PJOB ojob) { + PJOB jobp = (PJOB)MakePtr(Base, (size_t)ojob); + + jobp = (PJOB)new((long long)jobp) JOBJECT(0); + + if (jobp->First) { + jobp->First = (PJPR)MptrPair(jobp->First); + jobp->Last = (PJPR)MakePtr(Base, (size_t)jobp->Last); + } // endif First + + return jobp; +} // end of MptrObject + +/***********************************************************************/ +/* Replace all pair offsets by pointers. */ +/***********************************************************************/ +PJPR SWAP::MptrPair(PJPR ojp) { + PJPR jpp = (PJPR)MakePtr(Base, (size_t)ojp); + + jpp->Key = (PCSZ)MakePtr(Base, (size_t)jpp->Key); + + if (jpp->Val) + jpp->Val = (PJVAL)MptrJValue(jpp->Val); + + if (jpp->Next) + jpp->Next = (PJPR)MptrPair(jpp->Next); + + return jpp; +} // end of MptrPair + +/***********************************************************************/ +/* Replace all value offsets by pointers. */ +/***********************************************************************/ +PJVAL SWAP::MptrJValue(PJVAL ojv) { + PJVAL jvp = (PJVAL)MakePtr(Base, (size_t)ojv); + + jvp = (PJVAL)new((long long)jvp) JVALUE(0); + + if (!jvp->Del) { + if (jvp->DataType == TYPE_JSON) + jvp->Jsp = (PJSON)MptrJson(jvp->Jsp); + else if (jvp->DataType == TYPE_STRG) + jvp->Strp = (PSZ)MakePtr(Base, (size_t)jvp->Strp); + +// if (jvp->Val) +// jvp->Val = (PVL)MptrVal(jvp->Val); + + } // endif Del + + if (jvp->Next) + jvp->Next = (PJVAL)MptrJValue(jvp->Next); + + return jvp; +} // end of MptrJValue + +#if 0 +/***********************************************************************/ +/* Replace string offsets by a pointer. */ +/***********************************************************************/ +PVL SWAP::MptrVal(PVL ovl) { + PVL vlp = (PVL)MakePtr(Base, (size_t)ovl); + + if (vlp->Type == TYPE_STRG) + vlp->Strp = (PSZ)MakePtr(Base, (size_t)vlp->Strp); + + return vlp; +} // end of MptrValue +#endif // 0 diff --git a/storage/connect/json.h b/storage/connect/json.h index bc94b372133..3a026f5df22 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -5,8 +5,10 @@ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ +#pragma once #include <mysql_com.h> #include "value.h" +#include "xobject.h" #if defined(_DEBUG) #define X assert(false); @@ -14,240 +16,147 @@ #define X #endif -enum JTYP {TYPE_NULL = TYPE_VOID, - TYPE_STRG = TYPE_STRING, - TYPE_DBL = TYPE_DOUBLE, - TYPE_BOOL = TYPE_TINY, - TYPE_BINT = TYPE_BIGINT, - TYPE_DTM = TYPE_DATE, - TYPE_INTG = TYPE_INT, - TYPE_VAL = 12, - TYPE_JSON, - TYPE_JAR, - TYPE_JOB, - TYPE_JVAL}; - +enum JTYP { + TYPE_NULL = TYPE_VOID, + TYPE_STRG = TYPE_STRING, + TYPE_DBL = TYPE_DOUBLE, + TYPE_BOOL = TYPE_TINY, + TYPE_BINT = TYPE_BIGINT, + TYPE_INTG = TYPE_INT, + TYPE_DTM = TYPE_DATE, + TYPE_FLOAT, + TYPE_JAR, + TYPE_JOB, + TYPE_JVAL, + TYPE_JSON, + TYPE_DEL, + TYPE_UNKNOWN +}; + +class JDOC; class JOUT; class JSON; -class JMAP; class JVALUE; class JOBJECT; class JARRAY; -typedef class JPAIR *PJPR; +typedef class JDOC *PJDOC; typedef class JSON *PJSON; typedef class JVALUE *PJVAL; typedef class JOBJECT *PJOB; typedef class JARRAY *PJAR; -typedef struct { - char *str; - int len; - } STRG, *PSG; - -// BSON size should be equal on Linux and Windows -#define BMX 255 -typedef struct BSON* PBSON; +typedef struct JPAIR *PJPR; +//typedef struct VAL *PVL; /***********************************************************************/ -/* Structure used to return binary json to Json UDF functions. */ +/* Structure JPAIR. The pairs of a json Object. */ /***********************************************************************/ -struct BSON { - char Msg[BMX + 1]; - char *Filename; - PGLOBAL G; - int Pretty; - ulong Reslen; - my_bool Changed; - PJSON Top; - PJSON Jsp; - PBSON Bsp; -}; // end of struct BSON - -PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); +struct JPAIR { + PCSZ Key; // This pair key name + PJVAL Val; // To the value of the pair + PJPR Next; // To the next pair +}; // end of struct JPAIR +//PVL AllocVal(PGLOBAL g, JTYP type); char *NextChr(PSZ s, char sep); char *GetJsonNull(void); +const char* GetFmt(int type, bool un); -PJSON ParseJson(PGLOBAL g, char* s, int n, int* prty = NULL, bool* b = NULL); +PJSON ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty); -bool SerializeArray(JOUT *js, PJAR jarp, bool b); -bool SerializeObject(JOUT *js, PJOB jobp); -bool SerializeValue(JOUT *js, PJVAL jvp); -char *NextChr(PSZ s, char sep); DllExport bool IsNum(PSZ s); /***********************************************************************/ -/* Class JOUT. Used by Serialize. */ -/***********************************************************************/ -class JOUT : public BLOCK { - public: - JOUT(PGLOBAL gp) : BLOCK() {g = gp; Pretty = 3;} - - virtual bool WriteStr(const char *s) = 0; - virtual bool WriteChr(const char c) = 0; - virtual bool Escape(const char *s) = 0; - int Prty(void) {return Pretty;} - - // Member - PGLOBAL g; - int Pretty; -}; // end of class JOUT - -/***********************************************************************/ -/* Class JOUTSTR. Used to Serialize to a string. */ -/***********************************************************************/ -class JOUTSTR : public JOUT { - public: - JOUTSTR(PGLOBAL g); - - virtual bool WriteStr(const char *s); - virtual bool WriteChr(const char c); - virtual bool Escape(const char *s); - - // Member - char *Strp; // The serialized string - size_t N; // Position of next char - size_t Max; // String max size -}; // end of class JOUTSTR - -/***********************************************************************/ -/* Class JOUTFILE. Used to Serialize to a file. */ -/***********************************************************************/ -class JOUTFILE : public JOUT { - public: - JOUTFILE(PGLOBAL g, FILE *str, int pty) : JOUT(g) {Stream = str; Pretty = pty;} - - virtual bool WriteStr(const char *s); - virtual bool WriteChr(const char c); - virtual bool Escape(const char *s); - - // Member - FILE *Stream; -}; // end of class JOUTFILE - -/***********************************************************************/ -/* Class JOUTPRT. Used to Serialize to a pretty file. */ -/***********************************************************************/ -class JOUTPRT : public JOUTFILE { - public: - JOUTPRT(PGLOBAL g, FILE *str) : JOUTFILE(g, str, 2) {M = 0; B = false;} - - virtual bool WriteStr(const char *s); - virtual bool WriteChr(const char c); - - // Member - int M; - bool B; -}; // end of class JOUTPRT - -/***********************************************************************/ -/* Class PAIR. The pairs of a json Object. */ +/* Class JDOC. The class for parsing and serializing json documents. */ /***********************************************************************/ -class JPAIR : public BLOCK { - friend class JOBJECT; - friend class JSNX; - friend class JSON; - friend bool SerializeObject(JOUT *, PJOB); - public: - JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;} - - inline PCSZ GetKey(void) {return Key;} - inline PJVAL GetVal(void) {return Val;} - inline PJPR GetNext(void) {return Next;} +class JDOC: public BLOCK { + friend PJSON ParseJson(PGLOBAL, char*, size_t, int*, bool*); + friend PSZ Serialize(PGLOBAL, PJSON, char*, int); +public: + JDOC(void) : js(NULL), s(NULL), len(0), pty(NULL) {} - protected: - PCSZ Key; // This pair key name - PJVAL Val; // To the value of the pair - PJPR Next; // To the next pair -}; // end of class JPAIR - -/***********************************************************************/ -/* Class JSON. The base class for all other json classes. */ -/***********************************************************************/ -class JSON : public BLOCK { - friend PJSON ParseJson(PGLOBAL, char*, int, int*, bool*); - public: - JSON(void) : s(NULL), len(0), pty(NULL) {Size = 0;} - - int size(void) {return Size;} - virtual int GetSize(bool b) {return Size;} - virtual void Clear(void) {Size = 0;} - virtual JTYP GetType(void) {return TYPE_JSON;} - virtual JTYP GetValType(void) {X return TYPE_JSON;} - virtual void InitArray(PGLOBAL g) {X} -//virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} - virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;} - virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;} - virtual PJAR GetValList(PGLOBAL g) {X return NULL;} - virtual PJVAL GetValue(const char *key) {X return NULL;} - virtual PJOB GetObject(void) {return NULL;} - virtual PJAR GetArray(void) {return NULL;} - virtual PJVAL GetValue(int i) {X return NULL;} - virtual PVAL GetValue(void) {X return NULL;} - virtual PJSON GetJsp(void) { X return NULL; } - virtual PJSON GetJson(void) { X return NULL; } - virtual PJPR GetFirst(void) {X return NULL;} - virtual int GetInteger(void) {X return 0;} - virtual double GetFloat() {X return 0.0;} - virtual PSZ GetString(PGLOBAL g) {X return NULL;} - virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;} - virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } - virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } - virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X} - virtual void SetValue(PVAL valp) {X} - virtual void SetValue(PJSON jsp) {X} - virtual void SetString(PGLOBAL g, PSZ s, short c) {X} - virtual void SetInteger(PGLOBAL g, int n) {X} - virtual void SetFloat(PGLOBAL g, double f) {X} - virtual void DeleteKey(PCSZ k) {X} - virtual bool DeleteValue(int i) {X return true;} - virtual bool IsNull(void) {X return true;} + void SetJp(JOUT* jp) { js = jp; } protected: PJAR ParseArray(PGLOBAL g, int& i); PJOB ParseObject(PGLOBAL g, int& i); PJVAL ParseValue(PGLOBAL g, int& i); char *ParseString(PGLOBAL g, int& i); - PVAL ParseNumeric(PGLOBAL g, int& i); + void ParseNumeric(PGLOBAL g, int& i, PJVAL jvp); PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp); + bool SerializeArray(PJAR jarp, bool b); + bool SerializeObject(PJOB jobp); + bool SerializeValue(PJVAL jvp); - // Members - int Size; - - // Only used when parsing + // Members used when parsing and serializing private: + JOUT* js; char *s; int len; bool *pty; +}; // end of class JDOC + +/***********************************************************************/ +/* Class JSON. The base class for all other json classes. */ +/***********************************************************************/ +class JSON : public BLOCK { +public: + // Constructor + JSON(void) { Type = TYPE_JSON; } + JSON(int) {} + + // Implementation + inline JTYP GetType(void) { return Type; } + + // Methods + virtual int size(void) { return 1; } + virtual void Clear(void) { X } + virtual PJOB GetObject(void) { return NULL; } + virtual PJAR GetArray(void) { return NULL; } + virtual PJVAL GetArrayValue(int i) { X return NULL; } + virtual int GetSize(bool b) { X return 0; } + virtual PJSON GetJsp(void) { X return NULL; } + virtual PJPR GetFirst(void) { X return NULL; } + virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; } + virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } + virtual void SetValue(PJSON jsp) { X } + virtual bool DeleteValue(int i) { X return true; } + virtual bool IsNull(void) { X return true; } + + // Members + JTYP Type; }; // end of class JSON /***********************************************************************/ /* Class JOBJECT: contains a list of value pairs. */ /***********************************************************************/ class JOBJECT : public JSON { - friend bool SerializeObject(JOUT *, PJOB); + friend class JDOC; friend class JSNX; - public: - JOBJECT(void) : JSON() {First = Last = NULL;} - - using JSON::GetValue; - using JSON::SetValue; - virtual void Clear(void) {First = Last = NULL; Size = 0;} - virtual JTYP GetType(void) {return TYPE_JOB;} + friend class SWAP; +public: + JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } + JOBJECT(int i) : JSON(i) {} + + // Methods + virtual void Clear(void) {First = Last = NULL;} +//virtual JTYP GetValType(void) {return TYPE_JOB;} virtual PJPR GetFirst(void) {return First;} virtual int GetSize(bool b); - virtual PJPR AddPair(PGLOBAL g, PCSZ key); virtual PJOB GetObject(void) {return this;} - virtual PJVAL GetValue(const char* key); - virtual PJAR GetKeyList(PGLOBAL g); - virtual PJAR GetValList(PGLOBAL g); - virtual PSZ GetText(PGLOBAL g, PSZ text); + virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); - virtual void DeleteKey(PCSZ k); virtual bool IsNull(void); + // Specific + PJPR AddPair(PGLOBAL g, PCSZ key); + PJVAL GetKeyValue(const char* key); + PJAR GetKeyList(PGLOBAL g); + PJAR GetValList(PGLOBAL g); + void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key); + void DeleteKey(PCSZ k); + protected: PJPR First; PJPR Last; @@ -257,27 +166,30 @@ class JOBJECT : public JSON { /* Class JARRAY. */ /***********************************************************************/ class JARRAY : public JSON { - friend PJAR ParseArray(PGLOBAL, int&, STRG&, bool*); + friend class SWAP; public: - JARRAY(void) : JSON() {Alloc = 0; First = Last = NULL; Mvals = NULL;} + JARRAY(void); + JARRAY(int i) : JSON(i) {} - using JSON::GetValue; - using JSON::SetValue; + // Methods virtual void Clear(void) {First = Last = NULL; Size = 0;} - virtual JTYP GetType(void) {return TYPE_JAR;} + virtual int size(void) { return Size; } virtual PJAR GetArray(void) {return this;} virtual int GetSize(bool b); - PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL); - virtual void InitArray(PGLOBAL g); - virtual PJVAL GetValue(int i); - virtual PSZ GetText(PGLOBAL g, PSZ text); + virtual PJVAL GetArrayValue(int i); + virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i); virtual bool DeleteValue(int n); virtual bool IsNull(void); + // Specific + PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); + bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i); + void InitArray(PGLOBAL g); + protected: // Members + int Size; // The number of items in the array int Alloc; // The Mvals allocated size PJVAL First; // Used when constructing PJVAL Last; // Last constructed value @@ -290,43 +202,161 @@ class JARRAY : public JSON { class JVALUE : public JSON { friend class JARRAY; friend class JSNX; + friend class JSONDISC; friend class JSONCOL; friend class JSON; - friend bool SerializeValue(JOUT*, PJVAL); - public: - JVALUE(void) : JSON() {Clear();} + friend class JDOC; + friend class SWAP; +public: + JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); } JVALUE(PJSON jsp); +//JVALUE(PGLOBAL g, PVL vlp); JVALUE(PGLOBAL g, PVAL valp); JVALUE(PGLOBAL g, PCSZ strp); + JVALUE(int i) : JSON(i) {} + + //using JSON::GetVal; + //using JSON::SetVal; - using JSON::GetValue; - using JSON::SetValue; - virtual void Clear(void) - {Jsp = NULL; Value = NULL; Next = NULL; Del = false; Size = 1;} - virtual JTYP GetType(void) {return TYPE_JVAL;} + // Methods + virtual void Clear(void); +//virtual JTYP GetType(void) {return TYPE_JVAL;} virtual JTYP GetValType(void); virtual PJOB GetObject(void); virtual PJAR GetArray(void); - virtual PVAL GetValue(void) {return Value;} - virtual PJSON GetJsp(void) {return Jsp;} - virtual PJSON GetJson(void) { return (Jsp ? Jsp : this); } - virtual int GetInteger(void); - virtual long long GetBigint(void); - virtual double GetFloat(void); - virtual PSZ GetString(PGLOBAL g); - virtual PSZ GetText(PGLOBAL g, PSZ text); - virtual void SetValue(PJSON jsp); - virtual void SetValue(PVAL valp) { Value = valp; Jsp = NULL; } - virtual void SetString(PGLOBAL g, PSZ s, short c = 0); - virtual void SetInteger(PGLOBAL g, int n); - virtual void SetBigint(PGLOBAL g, longlong ll); - virtual void SetFloat(PGLOBAL g, double f); - virtual void SetTiny(PGLOBAL g, char f); + virtual PJSON GetJsp(void) {return (DataType == TYPE_JSON ? Jsp : NULL);} + virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool IsNull(void); + // Specific + //inline PVL GetVal(void) { return Val; } + //inline void SetVal(PVL vlp) { Val = vlp; } + inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } + PSZ GetString(PGLOBAL g, char* buff = NULL); + int GetInteger(void); + long long GetBigint(void); + double GetFloat(void); + PVAL GetValue(PGLOBAL g); + void SetValue(PJSON jsp); + void SetValue(PGLOBAL g, PVAL valp); + void SetString(PGLOBAL g, PSZ s, int ci = 0); + void SetInteger(PGLOBAL g, int n); + void SetBigint(PGLOBAL g, longlong ll); + void SetFloat(PGLOBAL g, double f); + void SetBool(PGLOBAL g, bool b); + protected: - PJSON Jsp; // To the json value - PVAL Value; // The numeric value - PJVAL Next; // Next value in array - bool Del; // True when deleted + union { + PJSON Jsp; // To the json value + char *Strp; // Ptr to a string + int N; // An integer value + long long LLn; // A big integer value + double F; // A (double) float value + bool B; // True or false + }; +//PVL Val; // To the string or numeric value + PJVAL Next; // Next value in array + JTYP DataType; // The data value type + int Nd; // Decimal number + bool Del; // True when deleted }; // end of class JVALUE + + +/***********************************************************************/ +/* Class JOUT. Used by Serialize. */ +/***********************************************************************/ +class JOUT : public BLOCK { +public: + JOUT(PGLOBAL gp) : BLOCK() { g = gp; Pretty = 3; } + + virtual bool WriteStr(const char* s) = 0; + virtual bool WriteChr(const char c) = 0; + virtual bool Escape(const char* s) = 0; + int Prty(void) { return Pretty; } + + // Member + PGLOBAL g; + int Pretty; +}; // end of class JOUT + +/***********************************************************************/ +/* Class JOUTSTR. Used to Serialize to a string. */ +/***********************************************************************/ +class JOUTSTR : public JOUT { +public: + JOUTSTR(PGLOBAL g); + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + virtual bool Escape(const char* s); + + // Member + char* Strp; // The serialized string + size_t N; // Position of next char + size_t Max; // String max size +}; // end of class JOUTSTR + +/***********************************************************************/ +/* Class JOUTFILE. Used to Serialize to a file. */ +/***********************************************************************/ +class JOUTFILE : public JOUT { +public: + JOUTFILE(PGLOBAL g, FILE* str, int pty) : JOUT(g) { Stream = str; Pretty = pty; } + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + virtual bool Escape(const char* s); + + // Member + FILE* Stream; +}; // end of class JOUTFILE + +/***********************************************************************/ +/* Class JOUTPRT. Used to Serialize to a pretty file. */ +/***********************************************************************/ +class JOUTPRT : public JOUTFILE { +public: + JOUTPRT(PGLOBAL g, FILE* str) : JOUTFILE(g, str, 2) { M = 0; B = false; } + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + + // Member + int M; + bool B; +}; // end of class JOUTPRT + + +/***********************************************************************/ +/* Class SWAP. Used to make or unmake a JSON tree movable. */ +/* This is done by making all pointers to offsets. */ +/***********************************************************************/ +class SWAP : public BLOCK { +public: + // Constructor + SWAP(PGLOBAL g, PJSON jsp) + { + G = g, Base = (char*)jsp - 8; + } + + // Methods + void SwapJson(PJSON jsp, bool move); + +protected: + size_t MoffJson(PJSON jnp); + size_t MoffArray(PJAR jarp); + size_t MoffObject(PJOB jobp); + size_t MoffJValue(PJVAL jvp); + size_t MoffPair(PJPR jpp); +//size_t MoffVal(PVL vlp); + PJSON MptrJson(PJSON jnp); + PJAR MptrArray(PJAR jarp); + PJOB MptrObject(PJOB jobp); + PJVAL MptrJValue(PJVAL jvp); + PJPR MptrPair(PJPR jpp); +//PVL MptrVal(PVL vlp); + + // Member + PGLOBAL G; + void *Base; +}; // end of class SWAP diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 44028a32564..044ed0772ea 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -27,12 +27,6 @@ #endif #define M 9 -bool IsNum(PSZ s); -char *NextChr(PSZ s, char sep); -char *GetJsonNull(void); -uint GetJsonGrpSize(void); -static int IsJson(UDF_ARGS *args, uint i, bool b = false); -static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i); static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error); static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, @@ -40,8 +34,10 @@ static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, static PJSON JsonNew(PGLOBAL g, JTYP type); static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL); static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64); +uint GetJsonGroupSize(void); +static void SetChanged(PBSON bsp); -static uint JsonGrpSize = 10; +uint JsonGrpSize = 10; /*********************************************************************************/ /* SubAlloc a new JSNX class with protection against memory exhaustion. */ @@ -63,7 +59,7 @@ static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len) return jsx; } /* end of JsnxNew */ - /* ----------------------------------- JSNX ------------------------------------ */ +/* ----------------------------------- JSNX ------------------------------------ */ /*********************************************************************************/ /* JSNX public constructor. */ @@ -347,7 +343,7 @@ PVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp) /*********************************************************************************/ /* SetValue: Set a value from a JVALUE contains. */ /*********************************************************************************/ -void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) +void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val) { if (val) { vp->SetNull(false); @@ -355,11 +351,22 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) if (Jb) { vp->SetValue_psz(Serialize(g, val->GetJsp(), NULL, 0)); } else switch (val->GetValType()) { + case TYPE_DTM: case TYPE_STRG: + vp->SetValue_psz(val->GetString(g)); + break; case TYPE_INTG: + vp->SetValue(val->GetInteger()); + break; case TYPE_BINT: + vp->SetValue(val->GetBigint()); + break; case TYPE_DBL: - vp->SetValue_pval(val->GetValue()); + if (vp->IsTypeNum()) + vp->SetValue(val->GetFloat()); + else // Get the proper number of decimals + vp->SetValue_psz(val->GetString(g)); + break; case TYPE_BOOL: if (vp->IsTypeNum()) @@ -369,14 +376,11 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) break; case TYPE_JAR: - SetJsonValue(g, vp, val->GetArray()->GetValue(0), n); + vp->SetValue_psz(val->GetArray()->GetText(g, NULL)); break; case TYPE_JOB: -// if (!vp->IsTypeNum() || !Strict) { vp->SetValue_psz(val->GetObject()->GetText(g, NULL)); break; -// } // endif Type - case TYPE_NULL: vp->SetNull(true); /* falls through */ @@ -412,11 +416,10 @@ void JSNX::ReadValue(PGLOBAL g) /*********************************************************************************/ PVAL JSNX::GetColumnValue(PGLOBAL g, PJSON row, int i) { - int n = Nod - 1; PJVAL val = NULL; val = GetRowValue(g, row, i); - SetJsonValue(g, Value, val, n); + SetJsonValue(g, Value, val); return Value; } // end of GetColumnValue @@ -430,7 +433,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) for (; i < Nod && row; i++) { if (Nodes[i].Op == OP_NUM) { - Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1); + Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1); val = new(g) JVALUE(g, Value); return val; } else if (Nodes[i].Op == OP_XX) { @@ -452,7 +455,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) } //endif Op } else - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: @@ -460,7 +463,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else if (Nodes[i].Op == OP_EXP) return (PJVAL)ExpandArray(g, arp, i); else @@ -468,7 +471,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif's @@ -488,7 +491,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) } // endfor i - // SetJsonValue(g, Value, val, n); + // SetJsonValue(g, Value, val); return val; } // end of GetRowValue @@ -519,17 +522,17 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) htrc("CalculateArray size=%d op=%d\n", ars, op); for (i = 0; i < ars; i++) { - jvrp = arp->GetValue(i); + jvrp = arp->GetArrayValue(i); if (trace(1)) htrc("i=%d nv=%d\n", i, nv); if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) { if (jvrp->IsNull()) { - jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING); + jvrp->SetString(g, GetJsonNull(), 0); jvp = jvrp; } else if (n < Nod - 1 && jvrp->GetJson()) { - jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1)); + jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); jvp = &jval; } else jvp = jvrp; @@ -539,10 +542,10 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) jvp->GetString(g), jvp->IsNull() ? 1 : 0); if (!nv++) { - SetJsonValue(g, vp, jvp, n); + SetJsonValue(g, vp, jvp); continue; } else - SetJsonValue(g, MulVal, jvp, n); + SetJsonValue(g, MulVal, jvp); if (!MulVal->IsNull()) { switch (op) { @@ -612,13 +615,13 @@ my_bool JSNX::CheckPath(PGLOBAL g) } else switch (row->GetType()) { case TYPE_JOB: if (Nodes[i].Key) - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: if (!Nodes[i].Key) if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = ((PJAR)row)->GetValue(Nodes[i].Rank); + val = ((PJAR)row)->GetArrayValue(Nodes[i].Rank); break; case TYPE_JVAL: @@ -655,20 +658,20 @@ PJSON JSNX::GetRow(PGLOBAL g) // Expected Array was not there, wrap the value continue; - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: arp = (PJAR)row; if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else - val = arp->GetValue(Nodes[i].Rx); + val = arp->GetArrayValue(Nodes[i].Rx); } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif Nodes @@ -695,9 +698,9 @@ PJSON JSNX::GetRow(PGLOBAL g) nwr = new(g)JOBJECT; if (row->GetType() == TYPE_JOB) { - ((PJOB)row)->SetValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key); + ((PJOB)row)->SetKeyValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key); } else if (row->GetType() == TYPE_JAR) { - ((PJAR)row)->AddValue(g, new(g)JVALUE(nwr)); + ((PJAR)row)->AddArrayValue(g, new(g)JVALUE(nwr)); ((PJAR)row)->InitArray(g); } else { strcpy(g->Message, "Wrong type when writing new row"); @@ -740,16 +743,16 @@ my_bool JSNX::WriteValue(PGLOBAL g, PJVAL jvalp) if (arp) { if (!Nodes[Nod-1].Key) { if (Nodes[Nod-1].Op == OP_EQ) - arp->SetValue(g, jvalp, Nodes[Nod-1].Rank); + arp->SetArrayValue(g, jvalp, Nodes[Nod-1].Rank); else - arp->AddValue(g, jvalp); + arp->AddArrayValue(g, jvalp); arp->InitArray(g); } // endif Key } else if (objp) { if (Nodes[Nod-1].Key) - objp->SetValue(g, jvalp, Nodes[Nod-1].Key); + objp->SetKeyValue(g, jvalp, Nodes[Nod-1].Key); } else if (jvp) jvp->SetValue(jvalp); @@ -781,13 +784,13 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k) switch (jsp->GetType()) { case TYPE_JAR: - err = LocateArray((PJAR)jsp); + err = LocateArray(g, (PJAR)jsp); break; case TYPE_JOB: - err = LocateObject((PJOB)jsp); + err = LocateObject(g, (PJOB)jsp); break; case TYPE_JVAL: - err = LocateValue((PJVAL)jsp); + err = LocateValue(g, (PJVAL)jsp); break; default: err = true; @@ -818,7 +821,7 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k) /*********************************************************************************/ /* Locate in a JSON Array. */ /*********************************************************************************/ -my_bool JSNX::LocateArray(PJAR jarp) +my_bool JSNX::LocateArray(PGLOBAL g, PJAR jarp) { char s[16]; size_t m = Jp->N; @@ -830,7 +833,7 @@ my_bool JSNX::LocateArray(PJAR jarp) if (Jp->WriteStr(s)) return true; - if (LocateValue(jarp->GetValue(i))) + if (LocateValue(g, jarp->GetArrayValue(i))) return true; } // endfor i @@ -841,7 +844,7 @@ my_bool JSNX::LocateArray(PJAR jarp) /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool JSNX::LocateObject(PJOB jobp) +my_bool JSNX::LocateObject(PGLOBAL g, PJOB jobp) { size_t m; @@ -856,7 +859,7 @@ my_bool JSNX::LocateObject(PJOB jobp) if (Jp->WriteStr(pair->Key)) return true; - if (LocateValue(pair->Val)) + if (LocateValue(g, pair->Val)) return true; } // endfor i @@ -867,14 +870,14 @@ my_bool JSNX::LocateObject(PJOB jobp) /*********************************************************************************/ /* Locate a JSON Value. */ /*********************************************************************************/ -my_bool JSNX::LocateValue(PJVAL jvp) +my_bool JSNX::LocateValue(PGLOBAL g, PJVAL jvp) { - if (CompareTree(Jvalp, jvp)) + if (CompareTree(g, Jvalp, jvp)) Found = (--K == 0); else if (jvp->GetArray()) - return LocateArray(jvp->GetArray()); + return LocateArray(g, jvp->GetArray()); else if (jvp->GetObject()) - return LocateObject(jvp->GetObject()); + return LocateObject(g, jvp->GetObject()); return false; } // end of LocateValue @@ -907,13 +910,13 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx) switch (jsp->GetType()) { case TYPE_JAR: - err = LocateArrayAll((PJAR)jsp); + err = LocateArrayAll(g, (PJAR)jsp); break; case TYPE_JOB: - err = LocateObjectAll((PJOB)jsp); + err = LocateObjectAll(g, (PJOB)jsp); break; case TYPE_JVAL: - err = LocateValueAll((PJVAL)jsp); + err = LocateValueAll(g, (PJVAL)jsp); break; default: err = true; @@ -945,7 +948,7 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx) /*********************************************************************************/ /* Locate in a JSON Array. */ /*********************************************************************************/ -my_bool JSNX::LocateArrayAll(PJAR jarp) +my_bool JSNX::LocateArrayAll(PGLOBAL g, PJAR jarp) { if (I < Imax) { Jpnp[++I].Type = TYPE_JAR; @@ -953,7 +956,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp) for (int i = 0; i < jarp->size(); i++) { Jpnp[I].N = i; - if (LocateValueAll(jarp->GetValue(i))) + if (LocateValueAll(g, jarp->GetArrayValue(i))) return true; } // endfor i @@ -967,7 +970,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp) /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool JSNX::LocateObjectAll(PJOB jobp) +my_bool JSNX::LocateObjectAll(PGLOBAL g, PJOB jobp) { if (I < Imax) { Jpnp[++I].Type = TYPE_JOB; @@ -975,7 +978,7 @@ my_bool JSNX::LocateObjectAll(PJOB jobp) for (PJPR pair = jobp->First; pair; pair = pair->Next) { Jpnp[I].Key = pair->Key; - if (LocateValueAll(pair->Val)) + if (LocateValueAll(g, pair->Val)) return true; } // endfor i @@ -989,14 +992,14 @@ my_bool JSNX::LocateObjectAll(PJOB jobp) /*********************************************************************************/ /* Locate a JSON Value. */ /*********************************************************************************/ -my_bool JSNX::LocateValueAll(PJVAL jvp) +my_bool JSNX::LocateValueAll(PGLOBAL g, PJVAL jvp) { - if (CompareTree(Jvalp, jvp)) + if (CompareTree(g, Jvalp, jvp)) return AddPath(); else if (jvp->GetArray()) - return LocateArrayAll(jvp->GetArray()); + return LocateArrayAll(g, jvp->GetArray()); else if (jvp->GetObject()) - return LocateObjectAll(jvp->GetObject()); + return LocateObjectAll(g, jvp->GetObject()); return false; } // end of LocateValueAll @@ -1004,7 +1007,7 @@ my_bool JSNX::LocateValueAll(PJVAL jvp) /*********************************************************************************/ /* Compare two JSON trees. */ /*********************************************************************************/ -my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2) +my_bool JSNX::CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2) { if (!jp1 || !jp2 || jp1->GetType() != jp2->GetType() || jp1->size() != jp2->size()) @@ -1013,26 +1016,22 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2) my_bool found = true; if (jp1->GetType() == TYPE_JVAL) { - PVAL v1 = jp1->GetValue(), v2 = jp2->GetValue(); - - if (v1 && v2) { - if (v1->GetType() == v2->GetType()) - found = !v1->CompareValue(v2); - else - found = false; +// PVL v1 = ((PJVAL)jp1)->GetVal(), v2 = ((PJVAL)jp2)->GetVal(); - } else - found = CompareTree(jp1->GetJsp(), jp2->GetJsp()); + if (((PJVAL)jp1)->DataType == TYPE_JSON && ((PJVAL)jp2)->DataType == TYPE_JSON) + found = CompareTree(g, jp1->GetJsp(), jp2->GetJsp()); + else + found = CompareValues(((PJVAL)jp1), ((PJVAL)jp2)); } else if (jp1->GetType() == TYPE_JAR) { for (int i = 0; found && i < jp1->size(); i++) - found = (CompareTree(jp1->GetValue(i), jp2->GetValue(i))); + found = (CompareTree(g, jp1->GetArrayValue(i), jp2->GetArrayValue(i))); } else if (jp1->GetType() == TYPE_JOB) { PJPR p1 = jp1->GetFirst(), p2 = jp2->GetFirst(); for (; found && p1 && p2; p1 = p1->Next, p2 = p2->Next) - found = CompareTree(p1->Val, p2->Val); + found = CompareTree(g, p1->Val, p2->Val); } else found = false; @@ -1041,10 +1040,68 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2) } // end of CompareTree /*********************************************************************************/ -/* Add the found path to the list. */ +/* Compare two VAL values and return true if they are equal. */ /*********************************************************************************/ -my_bool JSNX::AddPath(void) +my_bool JSNX::CompareValues(PJVAL v1, PJVAL v2) { + my_bool b = false; + + switch (v1->DataType) { + case TYPE_STRG: + if (v2->DataType == TYPE_STRG) { + if (v1->Nd || v2->Nd) // Case insensitive + b = (!stricmp(v1->Strp, v2->Strp)); + else + b = (!strcmp(v1->Strp, v2->Strp)); + + } // endif Type + + break; + case TYPE_DTM: + if (v2->DataType == TYPE_DTM) + b = (!strcmp(v1->Strp, v2->Strp)); + + break; + case TYPE_INTG: + if (v2->DataType == TYPE_INTG) + b = (v1->N == v2->N); + else if (v2->DataType == TYPE_BINT) + b = (v1->N == v2->LLn); + + break; + case TYPE_BINT: + if (v2->DataType == TYPE_INTG) + b = (v1->LLn == v2->N); + else if (v2->DataType == TYPE_BINT) + b = (v1->LLn == v2->LLn); + + break; + case TYPE_DBL: + if (v2->DataType == TYPE_DBL) + b = (v1->F == v2->F); + + break; + case TYPE_BOOL: + if (v2->DataType == TYPE_BOOL) + b = (v1->B == v2->B); + + break; + case TYPE_NULL: + if (v2->DataType == TYPE_NULL) + b = true; + + break; + default: + break; + } // endswitch Type + + return b; +} // end of CompareValues + +/*********************************************************************************/ +/* Add the found path to the list. */ +/*********************************************************************************/ +my_bool JSNX::AddPath(void) { char s[16]; if (Jp->WriteStr("\"$")) @@ -1113,7 +1170,7 @@ static void SetChanged(PBSON bsp) /*********************************************************************************/ /* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */ /*********************************************************************************/ -static uint GetJsonGroupSize(void) +uint GetJsonGroupSize(void) { return (JsonGrpSize) ? JsonGrpSize : GetJsonGrpSize(); } // end of GetJsonGroupSize @@ -1121,12 +1178,16 @@ static uint GetJsonGroupSize(void) /*********************************************************************************/ /* Program for SubSet re-initialization of the memory pool. */ /*********************************************************************************/ -static my_bool JsonSubSet(PGLOBAL g) +my_bool JsonSubSet(PGLOBAL g, my_bool b) { PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; - pph->To_Free = (g->Saved_Size) ? g->Saved_Size : (size_t)sizeof(POOLHEADER); + pph->To_Free = (g->Saved_Size) ? g->Saved_Size : sizeof(POOLHEADER); pph->FreeBlk = g->Sarea_Size - pph->To_Free; + + if (b) + g->Saved_Size = 0; + return FALSE; } /* end of JsonSubSet */ @@ -1144,7 +1205,7 @@ inline void JsonMemSave(PGLOBAL g) inline void JsonFreeMem(PGLOBAL g) { g->Activityp = NULL; - PlugExit(g); + g = PlugExit(g); } /* end of JsonFreeMem */ /*********************************************************************************/ @@ -1193,9 +1254,10 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp) case TYPE_JOB: jvp = new(g) JVALUE((PJSON)vp); break; - case TYPE_VAL: - jvp = new(g) JVALUE(g, (PVAL)vp); - break; +// case TYPE_VAL: +// jvp = new(g) JVALUE(g, (PVAL)vp); +// break; + case TYPE_DTM: case TYPE_STRG: jvp = new(g) JVALUE(g, (PCSZ)vp); break; @@ -1211,24 +1273,22 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp) } // end try/catch return jvp; -} /* end of JsonNew */ +} /* end of JvalNew */ /*********************************************************************************/ /* Allocate and initialise the memory area. */ /*********************************************************************************/ -static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, - char *message, my_bool mbn, - unsigned long reslen, unsigned long memlen, - unsigned long more = 0) +my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, char *message, my_bool mbn, + unsigned long reslen, unsigned long memlen, unsigned long more) { - PGLOBAL g = PlugInit(NULL, memlen + more + 500); // +500 to avoid CheckMem + PGLOBAL g = PlugInit(NULL, (size_t)memlen + more + 500); // +500 to avoid CheckMem if (!g) { strcpy(message, "Allocation error"); return true; } else if (g->Sarea_Size == 0) { strcpy(message, g->Message); - PlugExit(g); + g = PlugExit(g); return true; } // endif g @@ -1382,7 +1442,7 @@ static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n) /*********************************************************************************/ /* Returns not 0 if the argument is a JSON item or file name. */ /*********************************************************************************/ -static int IsJson(UDF_ARGS *args, uint i, bool b) +int IsJson(UDF_ARGS *args, uint i, bool b) { int n = 0; @@ -1405,7 +1465,7 @@ static int IsJson(UDF_ARGS *args, uint i, bool b) char *sap; PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024); - JsonSubSet(g); +// JsonSubSet(g); sap = MakePSZ(g, args, i); if (ParseJson(g, sap, strlen(sap))) @@ -1449,9 +1509,8 @@ static long GetFileLength(char *fn) /*********************************************************************************/ /* Calculate the reslen and memlen needed by a function. */ /*********************************************************************************/ -static my_bool CalcLen(UDF_ARGS *args, my_bool obj, - unsigned long& reslen, unsigned long& memlen, - my_bool mod = false) +my_bool CalcLen(UDF_ARGS *args, my_bool obj, unsigned long& reslen, + unsigned long& memlen, my_bool mod) { char fn[_MAX_PATH]; unsigned long i, k, m, n; @@ -1568,8 +1627,8 @@ static my_bool CalcLen(UDF_ARGS *args, my_bool obj, /*********************************************************************************/ /* Check if the calculated memory is enough. */ /*********************************************************************************/ -static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, - my_bool m, my_bool obj = false, my_bool mod = false) +my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, + my_bool m, my_bool obj, my_bool mod) { unsigned long rl, ml; my_bool b = false; @@ -1621,7 +1680,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, /*********************************************************************************/ /* Make a zero terminated string from the passed argument. */ /*********************************************************************************/ -static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) +PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) { if (args->arg_count > (unsigned)i && args->args[i]) { int n = args->lengths[i]; @@ -1690,7 +1749,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) /*********************************************************************************/ /* Parse a json file. */ /*********************************************************************************/ -static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len) +static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, size_t& len) { char *memory; HANDLE hFile; @@ -1712,9 +1771,13 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len) } // endif hFile /*******************************************************************************/ - /* Get the file size (assuming file is smaller than 4 GB) */ + /* Get the file size. */ /*******************************************************************************/ - len = mm.lenL; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + memory = (char *)mm.memory; if (!len) { // Empty or deleted file @@ -1742,7 +1805,7 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len) /*********************************************************************************/ /* Return a json file contains. */ /*********************************************************************************/ -static char *GetJsonFile(PGLOBAL g, char *fn) +char *GetJsonFile(PGLOBAL g, char *fn) { char *str; int h, n, len; @@ -1784,7 +1847,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) { char *sap = (args->arg_count > i) ? args->args[i] : NULL; int n, len; - short c; + int ci; long long bigint; PJSON jsp; PJVAL jvp = new(g) JVALUE; @@ -1827,8 +1890,8 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) jvp->SetValue(jsp); } else { - c = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - jvp->SetString(g, sap, c); + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + jvp->SetString(g, sap, ci); } // endif n } // endif len @@ -1839,7 +1902,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) - jvp->SetTiny(g, (char)bigint); + jvp->SetBool(g, (char)bigint); else jvp->SetBigint(g, bigint); @@ -1894,6 +1957,8 @@ static PJVAL MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, return jvp; } // end of MakeTypedValue +/* ------------------------------ The JSON UDF's ------------------------------- */ + /*********************************************************************************/ /* Make a Json value containing the parameter. */ /*********************************************************************************/ @@ -1962,7 +2027,7 @@ char *json_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result, PJAR arp = new(g)JARRAY; for (uint i = 0; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + arp->AddArrayValue(g, MakeValue(g, args, i)); arp->InitArray(g); @@ -2032,13 +2097,13 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { arp = new(g)JARRAY; - arp->AddValue(g, jvp); + arp->AddArrayValue(g, jvp); top = arp; } else arp = jvp->GetArray(); for (uint i = 1; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + arp->AddArrayValue(g, MakeValue(g, args, i)); arp->InitArray(g); str = MakeResult(g, args, top, args->arg_count); @@ -2130,7 +2195,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { - arp->AddValue(gb, JvalNew(gb, TYPE_JVAL, jvp)); + arp->AddArrayValue(gb, JvalNew(gb, TYPE_JVAL, jvp)); jvp->SetValue(arp); if (!top) @@ -2142,7 +2207,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, arp = jvp->GetArray(); if (arp) { - arp->AddValue(gb, MakeValue(gb, args, 1), x); + arp->AddArrayValue(gb, MakeValue(gb, args, 1), x); arp->InitArray(gb); str = MakeResult(g, args, top, n); } else @@ -2311,7 +2376,7 @@ long long jsonsum_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *err PJAR arp = jvp->GetArray(); for (int i = 0; i < arp->size(); i++) - n += arp->GetValue(i)->GetBigint(); + n += arp->GetArrayValue(i)->GetBigint(); } else { PUSH_WARNING("First argument target is not an array"); @@ -2386,7 +2451,7 @@ double jsonsum_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error PJAR arp = jvp->GetArray(); for (int i = 0; i < arp->size(); i++) - n += arp->GetValue(i)->GetFloat(); + n += arp->GetArrayValue(i)->GetFloat(); } else { PUSH_WARNING("First argument target is not an array"); @@ -2451,7 +2516,7 @@ double jsonavg_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error if (arp->size()) { for (int i = 0; i < arp->size(); i++) - n += arp->GetValue(i)->GetFloat(); + n += arp->GetArrayValue(i)->GetFloat(); n /= arp->size(); } // endif size @@ -2510,7 +2575,7 @@ char *json_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) - objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); str = Serialize(g, objp, NULL, 0); } // endif objp @@ -2560,7 +2625,7 @@ char *json_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) if (!(jvp = MakeValue(g, args, i))->IsNull()) - objp->SetValue(g, jvp, MakeKey(g, args, i)); + objp->SetKeyValue(g, jvp, MakeKey(g, args, i)); str = Serialize(g, objp, NULL, 0); } // endif objp @@ -2612,7 +2677,7 @@ char *json_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i += 2) - objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); str = Serialize(g, objp, NULL, 0); } // endif objp @@ -2696,7 +2761,7 @@ char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, jobp = jvp->GetObject(); jvp = MakeValue(gb, args, 1); key = MakeKey(gb, args, 1); - jobp->SetValue(gb, jvp, key); + jobp->SetKeyValue(gb, jvp, key); str = MakeResult(g, args, top); } else { PUSH_WARNING("First argument target is not an object"); @@ -3049,7 +3114,7 @@ void json_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PJAR arp = (PJAR)g->Activityp; if (arp && g->N-- > 0) - arp->AddValue(g, MakeValue(g, args, 0)); + arp->AddArrayValue(g, MakeValue(g, args, 0)); } // end of json_array_grp_add @@ -3126,7 +3191,7 @@ void json_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PJOB objp = (PJOB)g->Activityp; if (g->N-- > 0) - objp->SetValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0)); + objp->SetKeyValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0)); } // end of json_object_grp_add @@ -4007,17 +4072,14 @@ my_bool jsoncontains_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return JsonInit(initid, args, message, false, reslen, memlen, more); } // end of jsoncontains_init -long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *is_null, char *error) +long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) { - char *p __attribute__((unused)), res[256]; - long long n; + char isn, res[256]; unsigned long reslen; - *is_null = 0; - p = jsonlocate(initid, args, res, &reslen, is_null, error); - n = (*is_null) ? 0LL : 1LL; - return n; + isn = 0; + jsonlocate(initid, args, res, &reslen, &isn, error); + return (isn) ? 0LL : 1LL; } // end of jsoncontains void jsoncontains_deinit(UDF_INIT* initid) @@ -4059,8 +4121,7 @@ my_bool jsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return JsonInit(initid, args, message, true, reslen, memlen, more); } // end of jsoncontains_path_init -long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *is_null, char *error) +long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) { char *p, *path; long long n; @@ -4071,7 +4132,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, if (g->N) { if (!g->Activityp) { - *is_null = 1; return 0LL; } else return *(long long*)g->Activityp; @@ -4129,7 +4189,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, err: if (g->Mrr) *error = 1; - *is_null = 1; return 0LL; } // end of jsoncontains_path @@ -4406,7 +4465,8 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result, fn = MakePSZ(g, args, 0); if (args->arg_count > 1) { - int len, pretty = 3, pty = 3; + int pretty = 3, pty = 3; + size_t len; PJSON jsp; PJVAL jvp = NULL; @@ -4609,7 +4669,7 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result, strcat(bsp->Msg, " array"); for (uint i = 0; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + arp->AddArrayValue(g, MakeValue(g, args, i)); arp->InitArray(g); } // endif arp && bsp @@ -4670,7 +4730,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { - arp->AddValue(gb, jvp); + arp->AddArrayValue(gb, jvp); top = arp; } // endif arp @@ -4678,7 +4738,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, arp = jvp->GetArray(); for (uint i = 1; i < args->arg_count; i++) - arp->AddValue(gb, MakeValue(gb, args, i)); + arp->AddArrayValue(gb, MakeValue(gb, args, i)); arp->InitArray(gb); @@ -4761,7 +4821,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { - arp->AddValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp)); + arp->AddArrayValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp)); jvp->SetValue(arp); if (!top) @@ -4772,7 +4832,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, } else arp = jvp->GetArray(); - arp->AddValue(gb, MakeValue(gb, args, 1), x); + arp->AddArrayValue(gb, MakeValue(gb, args, 1), x); arp->InitArray(gb); } else { PUSH_WARNING("First argument target is not an array"); @@ -4900,7 +4960,7 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) - objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) @@ -4957,7 +5017,7 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) if (!(jvp = MakeValue(g, args, i))->IsNull()) - objp->SetValue(g, jvp, MakeKey(g, args, i)); + objp->SetKeyValue(g, jvp, MakeKey(g, args, i)); if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) strcat(bsp->Msg, " object"); @@ -5016,7 +5076,7 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i += 2) - objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) strcat(bsp->Msg, " object"); @@ -5094,7 +5154,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, jobp = jvp->GetObject(); jvp = MakeValue(gb, args, 1); key = MakeKey(gb, args, 1); - jobp->SetValue(gb, jvp, key); + jobp->SetKeyValue(gb, jvp, key); } else { PUSH_WARNING("First argument target is not an object"); // if (g->Mrr) *error = 1; (only if no path) @@ -5313,7 +5373,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, // Get the json tree if ((jvp = jsx->GetRowValue(g, jsp, 0, false))) { - jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue()); + jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_JVAL, jvp->GetValue(g)); if ((bsp = JbinAlloc(g, args, initid->max_length, jsp))) strcat(bsp->Msg, " item"); @@ -5639,7 +5699,8 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *fn; - int pretty = 3, len = 0, pty = 3; + int pretty = 3, pty = 3; + size_t len = 0; PJSON jsp; PJVAL jvp = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; @@ -5782,11 +5843,11 @@ my_bool jfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { } // endif args CalcLen(args, false, reslen, memlen); - return JsonInit(initid, args, message, false, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); } // end of jfile_convert_init char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, - unsigned long *res_length, char *, char *error) { + unsigned long *res_length, char *is_null, char *error) { char *str, *fn, *ofn; int lrecl = (int)*(longlong*)args->args[2]; PGLOBAL g = (PGLOBAL)initid->ptr; @@ -5804,10 +5865,15 @@ char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, str = (char*)g->Xchk; if (!str) { - str = PlugDup(g, g->Message); - } // endif str + PUSH_WARNING(g->Message ? g->Message : "Unexpected error"); + *is_null = 1; + *error = 1; + *res_length = 0; + } else { + strcpy(result, str); + *res_length = strlen(str); + } // endif str - *res_length = strlen(str); return str; } // end of jfile_convert @@ -5815,9 +5881,136 @@ void jfile_convert_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); } // end of jfile_convert_deinit +/*********************************************************************************/ +/* Convert a prettiest Json file to Pretty=0. */ +/*********************************************************************************/ +my_bool jfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count != 2 && args->arg_count != 3) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third Argument must be an integer (LRECL)"); + return true; + } else for (int i = 0; i < 2; i++) + if (args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Arguments %d must be a string (file name)", i + 1); + return true; + } // endif args + + CalcLen(args, false, reslen, memlen); + memlen = memlen * M; + memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024; + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of jfile_bjson_init + +char *jfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char*, char *error) { + char *fn, *ofn, *buf, *str = NULL; + bool loop; + ssize_t len, newloc; + size_t lrecl, *binszp; + PJSON jsp; + SWAP *swp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + PlugSubSet(g->Sarea, g->Sarea_Size); + fn = MakePSZ(g, args, 0); + ofn = MakePSZ(g, args, 1); + + if (args->arg_count == 3) + lrecl = (size_t)*(longlong*)args->args[2]; + else + lrecl = 1024; + + if (!g->Xchk) { + int msgid = MSGID_OPEN_MODE_STRERROR; + FILE *fout; + FILE *fin; + + if (!(fin = global_fopen(g, msgid, fn, "rt"))) + str = strcpy(result, g->Message); + else if (!(fout = global_fopen(g, msgid, ofn, "wb"))) + str = strcpy(result, g->Message); + else if ((buf = (char*)PlgDBSubAlloc(g, NULL, lrecl)) && + (binszp = (size_t*)PlgDBSubAlloc(g, NULL, sizeof(size_t)))) { + JsonMemSave(g); + + try { + do { + loop = false; + JsonSubSet(g); + + if (!fgets(buf, lrecl, fin)) { + if (!feof(fin)) { + sprintf(g->Message, "Error %d reading %zd bytes from %s", errno, lrecl, fn); + str = strcpy(result, g->Message); + } else + str = strcpy(result, ofn); + + } else if ((len = strlen(buf))) { + if ((jsp = ParseJson(g, buf, len))) { + newloc = (size_t)PlugSubAlloc(g, NULL, 0); + *binszp = newloc - (size_t)jsp; + + swp = new(g) SWAP(g, jsp); + swp->SwapJson(jsp, true); + + if (fwrite(binszp, sizeof(binszp), 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, sizeof(binszp), ofn); + str = strcpy(result, g->Message); + } else if (fwrite(jsp, *binszp, 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, *binszp, ofn); + str = strcpy(result, g->Message); + } else + loop = true; + + } else { + str = strcpy(result, g->Message); + } // endif jsp + + } else + loop = true; + + } while (loop); + + } catch (int) { + str = strcpy(result, g->Message); + } catch (const char* msg) { + str = strcpy(result, msg); + } // end catch + + } else + str = strcpy(result, g->Message); + + if (fin) fclose(fin); + if (fout) fclose(fout); + g->Xchk = str; + } else + str = (char*)g->Xchk; + + if (!str) { + if (g->Message) + str = strcpy(result, g->Message); + else + str = strcpy(result, "Unexpected error"); + + } // endif str + + *res_length = strlen(str); + return str; +} // end of jfile_bjson + +void jfile_bjson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of jfile_bjson_deinit + /* --------------------------------- Class JUP --------------------------------- */ -#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0) +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) /*********************************************************************************/ /* JUP public constructor. */ @@ -5825,7 +6018,9 @@ void jfile_convert_deinit(UDF_INIT* initid) { JUP::JUP(PGLOBAL g) { fs = NULL; s = buff = NULL; - i = k = len = recl = 0; + len = 0; + k = recl = 0; + i = 0; } // end of JUP constructor /*********************************************************************************/ @@ -5853,11 +6048,16 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { /*******************************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*******************************************************************************/ - if (!mm.lenL) { // Empty or deleted file + if (!mm.lenL && !mm.lenH) { // Empty or deleted file CloseFileHandle(hFile); return NULL; - } else - len = (int)mm.lenL; + } else { + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + } // endif size if (!mm.memory) { CloseFileHandle(hFile); @@ -5875,7 +6075,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { sprintf(g->Message, MSG(OPEN_MODE_ERROR), "w", (int)errno, outfn); strcat(strcat(g->Message, ": "), strerror(errno)); - CloseMemMap(mm.memory, (size_t)mm.lenL); + CloseMemMap(mm.memory, len); return NULL; } // endif fs @@ -5884,7 +6084,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { if (!unPretty(g, lrecl)) ret = outfn; - CloseMemMap(mm.memory, (size_t)mm.lenL); + CloseMemMap(mm.memory, len); fclose(fs); return ret; } // end of UnprettyJsonFile @@ -6329,8 +6529,7 @@ my_bool countin_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return false; } // end of countin_init -long long countin(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *is_null, char *) +long long countin(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *) { PSZ str1, str2; char *s; diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index 897b0fe9919..689a02ebbc5 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -1,10 +1,11 @@ /******************** tabjson H Declares Source Code File (.H) *******************/ -/* Name: jsonudf.h Version 1.3 */ +/* Name: jsonudf.h Version 1.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2015-2020 */ /* */ /* This file contains the JSON UDF function and class declares. */ /*********************************************************************************/ +#pragma once #include "global.h" #include "plgdbsem.h" #include "block.h" @@ -15,6 +16,27 @@ #define UDF_EXEC_ARGS \ UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char* +// BSON size should be equal on Linux and Windows +#define BMX 255 +typedef struct BSON* PBSON; + +/***********************************************************************/ +/* Structure used to return binary json to Json UDF functions. */ +/***********************************************************************/ +struct BSON { + char Msg[BMX + 1]; + char *Filename; + PGLOBAL G; + int Pretty; + ulong Reslen; + my_bool Changed; + PJSON Top; + PJSON Jsp; + PBSON Bsp; +}; // end of struct BSON + +PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); + /*********************************************************************************/ /* The JSON tree node. Can be an Object or an Array. */ /*********************************************************************************/ @@ -29,9 +51,29 @@ typedef struct _jnode { } JNODE, *PJNODE; typedef class JSNX *PJSNX; -typedef class JOUTPATH *PJTP; -typedef class JOUTALL *PJTA; +/*********************************************************************************/ +/* The JSON utility functions. */ +/*********************************************************************************/ +bool IsNum(PSZ s); +char *NextChr(PSZ s, char sep); +char *GetJsonNull(void); +uint GetJsonGrpSize(void); +my_bool JsonSubSet(PGLOBAL g, my_bool b = false); +my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen, + unsigned long& memlen, my_bool mod = false); +my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn, + unsigned long reslen, unsigned long memlen, + unsigned long more = 0); +my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n, + my_bool m, my_bool obj = false, my_bool mod = false); +PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i); +int IsJson(UDF_ARGS* args, uint i, bool b = false); +char *GetJsonFile(PGLOBAL g, char* fn); + +/*********************************************************************************/ +/* The JSON UDF functions. */ +/*********************************************************************************/ extern "C" { DllExport my_bool jsonvalue_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char *jsonvalue(UDF_EXEC_ARGS); @@ -132,7 +174,7 @@ extern "C" { DllExport void jsonget_real_deinit(UDF_INIT*); DllExport my_bool jsoncontains_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport long long jsoncontains(UDF_EXEC_ARGS); + DllExport long long jsoncontains(UDF_INIT*, UDF_ARGS*, char*, char*); DllExport void jsoncontains_deinit(UDF_INIT*); DllExport my_bool jsonlocate_init(UDF_INIT*, UDF_ARGS*, char*); @@ -144,7 +186,7 @@ extern "C" { DllExport void json_locate_all_deinit(UDF_INIT*); DllExport my_bool jsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport long long jsoncontains_path(UDF_EXEC_ARGS); + DllExport long long jsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*); DllExport void jsoncontains_path_deinit(UDF_INIT*); DllExport my_bool json_set_item_init(UDF_INIT*, UDF_ARGS*, char*); @@ -239,6 +281,10 @@ extern "C" { DllExport char* jfile_convert(UDF_EXEC_ARGS); DllExport void jfile_convert_deinit(UDF_INIT*); + DllExport my_bool jfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* jfile_bjson(UDF_EXEC_ARGS); + DllExport void jfile_bjson_deinit(UDF_INIT*); + DllExport my_bool envar_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char *envar(UDF_EXEC_ARGS); @@ -248,17 +294,17 @@ extern "C" { #endif // DEVELOPMENT DllExport my_bool countin_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport long long countin(UDF_EXEC_ARGS); -} // extern "C" + DllExport long long countin(UDF_INIT*, UDF_ARGS*, char*, char*); +} // extern "C" /*********************************************************************************/ /* Structure JPN. Used to make the locate path. */ /*********************************************************************************/ typedef struct _jpn { - enum JTYP Type; - PCSZ Key; - int N; + int Type; + PCSZ Key; + int N; } JPN, *PJPN; /*********************************************************************************/ @@ -290,15 +336,16 @@ protected: PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); PVAL MakeJson(PGLOBAL g, PJSON jsp); - void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); + void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val); PJSON GetRow(PGLOBAL g); - my_bool LocateArray(PJAR jarp); - my_bool LocateObject(PJOB jobp); - my_bool LocateValue(PJVAL jvp); - my_bool LocateArrayAll(PJAR jarp); - my_bool LocateObjectAll(PJOB jobp); - my_bool LocateValueAll(PJVAL jvp); - my_bool CompareTree(PJSON jp1, PJSON jp2); + my_bool CompareValues(PJVAL v1, PJVAL v2); + my_bool LocateArray(PGLOBAL g, PJAR jarp); + my_bool LocateObject(PGLOBAL g, PJOB jobp); + my_bool LocateValue(PGLOBAL g, PJVAL jvp); + my_bool LocateArrayAll(PGLOBAL g, PJAR jarp); + my_bool LocateObjectAll(PGLOBAL g, PJOB jobp); + my_bool LocateValueAll(PGLOBAL g, PJVAL jvp); + my_bool CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2); my_bool AddPath(void); // Default constructor not to be used @@ -355,11 +402,10 @@ public: void CopyNumeric(PGLOBAL g); // Members - FILE* fs; - char* s; - char* buff; - int len; - int recl; - int i, k; + FILE *fs; + char *s; + char *buff; + size_t len; + uint i; + int k, recl; }; // end of class JUP - diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp index 69bbe980eba..61921555ad7 100644 --- a/storage/connect/libdoc.cpp +++ b/storage/connect/libdoc.cpp @@ -378,7 +378,7 @@ bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) if (zipped && InitZip(g, entry)) return true; - int n __attribute__((unused))= xmlKeepBlanksDefault(1); + xmlKeepBlanksDefault(1); return MakeNSlist(g); } // end of Initialize diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index f8b3dc03aa5..e3fa00e119f 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -16,9 +16,9 @@ /*************** Mycat CC Program Source Code File (.CC) ***************/ /* PROGRAM NAME: MYCAT */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.8 */ /* */ -/* Author: Olivier Bertrand 2012 - 2019 */ +/* Author: Olivier Bertrand 2012 - 2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -82,7 +82,11 @@ #endif // JAVA_SUPPORT #include "tabpivot.h" #include "tabvir.h" +#if defined(BSON_SUPPORT) +#include "tabbson.h" +#else #include "tabjson.h" +#endif // BSON_SUPPORT #include "ha_connect.h" #if defined(XML_SUPPORT) #include "tabxml.h" @@ -107,6 +111,9 @@ extern "C" HINSTANCE s_hModule; // Saved module handle #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) bool MongoEnabled(void); #endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(BSON_SUPPORT) +bool Force_Bson(void); +#endif // BSON_SUPPORT /***********************************************************************/ /* Get the plugin directory. */ @@ -130,25 +137,25 @@ TABTYPE GetTypeID(const char *type) : (!stricmp(type, "DBF")) ? TAB_DBF #if defined(XML_SUPPORT) : (!stricmp(type, "XML")) ? TAB_XML -#endif +#endif // XML_SUPPORT : (!stricmp(type, "INI")) ? TAB_INI : (!stricmp(type, "VEC")) ? TAB_VEC #if defined(ODBC_SUPPORT) : (!stricmp(type, "ODBC")) ? TAB_ODBC -#endif +#endif // ODBC_SUPPORT #if defined(JAVA_SUPPORT) : (!stricmp(type, "JDBC")) ? TAB_JDBC -#endif +#endif // JAVA_SUPPORT #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) : (!stricmp(type, "MONGO") && MongoEnabled()) ? TAB_MONGO -#endif +#endif // JAVA_SUPPORT || CMGO_SUPPORT : (!stricmp(type, "MYSQL")) ? TAB_MYSQL : (!stricmp(type, "MYPRX")) ? TAB_MYSQL : (!stricmp(type, "DIR")) ? TAB_DIR #if defined(__WIN__) : (!stricmp(type, "MAC")) ? TAB_MAC : (!stricmp(type, "WMI")) ? TAB_WMI -#endif +#endif // __WIN__ : (!stricmp(type, "TBL")) ? TAB_TBL : (!stricmp(type, "XCOL")) ? TAB_XCL : (!stricmp(type, "OCCUR")) ? TAB_OCCUR @@ -157,9 +164,12 @@ TABTYPE GetTypeID(const char *type) : (!stricmp(type, "PIVOT")) ? TAB_PIVOT : (!stricmp(type, "VIR")) ? TAB_VIR : (!stricmp(type, "JSON")) ? TAB_JSON +#if defined(BSON_SUPPORT) + : (!stricmp(type, "BSON")) ? TAB_BSON +#endif // BSON_SUPPORT #if defined(ZIP_SUPPORT) : (!stricmp(type, "ZIP")) ? TAB_ZIP -#endif +#endif // ZIP_SUPPORT : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY; } // end of GetTypeID @@ -181,6 +191,9 @@ bool IsFileType(TABTYPE type) case TAB_INI: case TAB_VEC: case TAB_JSON: +#if defined(BSON_SUPPORT) + case TAB_BSON: +#endif // BSON_SUPPORT case TAB_REST: // case TAB_ZIP: isfile= true; @@ -276,6 +289,9 @@ bool IsTypeIndexable(TABTYPE type) case TAB_VEC: case TAB_DBF: case TAB_JSON: +#if defined(BSON_SUPPORT) + case TAB_BSON: +#endif // BSON_SUPPORT idx= true; break; default: @@ -302,6 +318,9 @@ int GetIndexType(TABTYPE type) case TAB_VEC: case TAB_DBF: case TAB_JSON: +#if defined(BSON_SUPPORT) + case TAB_BSON: +#endif // BSON_SUPPORT xtyp= 1; break; case TAB_MYSQL: @@ -445,7 +464,7 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_XML: tdp= new(g) XMLDEF; break; #endif // XML_SUPPORT #if defined(VCT_SUPPORT) - case TAB_VEC: tdp = new(g) VCTDEF; break; + case TAB_VEC: tdp= new(g) VCTDEF; break; #endif // VCT_SUPPORT #if defined(ODBC_SUPPORT) case TAB_ODBC: tdp= new(g) ODBCDEF; break; @@ -465,9 +484,20 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_MYSQL: tdp= new(g) MYSQLDEF; break; case TAB_PIVOT: tdp= new(g) PIVOTDEF; break; case TAB_VIR: tdp= new(g) VIRDEF; break; - case TAB_JSON: tdp= new(g) JSONDEF; break; + case TAB_JSON: +#if defined(BSON_SUPPORT) + if (Force_Bson()) + tdp= new(g) BSONDEF; + else +#endif // BSON_SUPPORT + tdp= new(g) JSONDEF; + + break; +#if defined(BSON_SUPPORT) + case TAB_BSON: tdp= new(g) BSONDEF; break; +#endif // BSON_SUPPORT #if defined(ZIP_SUPPORT) - case TAB_ZIP: tdp = new(g) ZIPDEF; break; + case TAB_ZIP: tdp= new(g) ZIPDEF; break; #endif // ZIP_SUPPORT #if defined(REST_SUPPORT) case TAB_REST: tdp= new (g) RESTDEF; break; diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def index a4d629fc3d1..5107de7a930 100644 --- a/storage/connect/mysql-test/connect/disabled.def +++ b/storage/connect/mysql-test/connect/disabled.def @@ -16,9 +16,12 @@ jdbc_postgresql : Variable settings depend on machine configuration json_mongo_c : Need MongoDB running and its C Driver installed json_java_2 : Need MongoDB running and its Java Driver installed json_java_3 : Need MongoDB running and its Java Driver installed +bson_mongo_c : Need MongoDB running and its C Driver installed +bson_java_2 : Need MongoDB running and its Java Driver installed +bson_java_3 : Need MongoDB running and its Java Driver installed mongo_c : Need MongoDB running and its C Driver installed mongo_java_2 : Need MongoDB running and its Java Driver installed mongo_java_3 : Need MongoDB running and its Java Driver installed tbl_thread : Bug MDEV-9844,10179,14214 03/01/2018 OB Option THREAD removed -grant2 : Until fixed +#bson : Development #vcol : Different error code on different versions diff --git a/storage/connect/mysql-test/connect/r/alter_xml.result b/storage/connect/mysql-test/connect/r/alter_xml.result index 7cdb1e5d21c..d2f882f1287 100644 --- a/storage/connect/mysql-test/connect/r/alter_xml.result +++ b/storage/connect/mysql-test/connect/r/alter_xml.result @@ -54,7 +54,7 @@ line </t1> # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; c d 1 One @@ -64,7 +64,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL, - `d` char(10) NOT NULL `FIELD_FORMAT`='@' + `d` char(10) NOT NULL `XPATH`='@' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=domdoc,rownode=row' `HEADER`=0 SELECT * FROM t2; line diff --git a/storage/connect/mysql-test/connect/r/alter_xml2.result b/storage/connect/mysql-test/connect/r/alter_xml2.result index 8eb56e3dcc3..a15be966aa8 100644 --- a/storage/connect/mysql-test/connect/r/alter_xml2.result +++ b/storage/connect/mysql-test/connect/r/alter_xml2.result @@ -56,7 +56,7 @@ line </t1> # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; c d 1 One @@ -66,7 +66,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL, - `d` char(10) NOT NULL `FIELD_FORMAT`='@' + `d` char(10) NOT NULL `XPATH`='@' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=libxml2,rownode=row' `HEADER`=0 SELECT * FROM t2; line diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result new file mode 100644 index 00000000000..fd15e020aac --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson.result @@ -0,0 +1,517 @@ +# +# Testing doc samples +# +CREATE TABLE t1 +( +ISBN CHAR(15), +LANG CHAR(2), +SUBJECT CHAR(32), +AUTHOR CHAR(64), +TITLE CHAR(32), +TRANSLATION CHAR(32), +TRANSLATOR CHAR(80), +PUBLISHER CHAR(32), +DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB +9782212090819 fr applications Jean-Christophe Bernadac, François Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Testing Jpath. Get the number of authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +Authors INT(2) JPATH='$.AUTHOR[#]', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject Authors Title Translation Translator Publisher Location Year +9782212090819 fr applications 2 Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications 1 XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Concatenates the authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe and François Bernadac and Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Testing expanding authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications François Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 +UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; +SELECT * FROM t1 WHERE ISBN = '9782212090819'; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +# +# To add an author a new table must be created +# +CREATE TABLE t2 ( +FIRSTNAME CHAR(32), +LASTNAME CHAR(32)) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; +SELECT * FROM t2; +FIRSTNAME LASTNAME +William J. Pardi +INSERT INTO t2 VALUES('Charles','Dickens'); +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 +9782840825685 fr applications Charles Dickens XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +DROP TABLE t2; +# +# Check the biblio file has the good format +# +CREATE TABLE t1 +( +line char(255) +) +ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; +SELECT * FROM t1; +line +[ + { + "ISBN": "9782212090819", + "LANG": "fr", + "SUBJECT": "applications", + "AUTHOR": [ + { + "FIRSTNAME": "Jean-Christophe", + "LASTNAME": "Bernadac" + }, + { + "FIRSTNAME": "Philippe", + "LASTNAME": "Knab" + } + ], + "TITLE": "Construire une application XML", + "PUBLISHER": { + "NAME": "Eyrolles", + "PLACE": "Paris" + }, + "DATEPUB": 1999 + }, + { + "ISBN": "9782840825685", + "LANG": "fr", + "SUBJECT": "applications", + "AUTHOR": [ + { + "FIRSTNAME": "William J.", + "LASTNAME": "Pardi" + }, + { + "FIRSTNAME": "Charles", + "LASTNAME": "Dickens" + } + ], + "TITLE": "XML en Action", + "TRANSLATION": "adapté de l'anglais par", + "TRANSLATOR": { + "FIRSTNAME": "James", + "LASTNAME": "Guerin" + }, + "PUBLISHER": { + "NAME": "Microsoft Press", + "PLACE": "Paris" + }, + "DATEPUB": 1999 + } +] +DROP TABLE t1; +# +# Testing a pretty=0 file +# +CREATE TABLE t1 +( +ISBN CHAR(15) NOT NULL, +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', +TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', +TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB', +INDEX IX(ISBN) +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 IX 1 ISBN A NULL NULL NULL XINDEX +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation TranslatorFN TranslatorLN Publisher Location Year +9782212090819 fr applications Jean-Michel Bernadac Construire une application XML NULL NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications François Knab Construire une application XML NULL NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 2001 +DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref IX IX 15 const 1 Using where +UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; +ERROR HY000: Got error 122 'Cannot write expanded column when Pretty is not 2' from CONNECT +DROP TABLE t1; +# +# A file with 2 arrays +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer+Food+Food+Car 69.00 +Joe 4 Beer+Beer+Food+Food+Beer 83.00 +Joe 5 Beer+Food 26.00 +Beth 3 Beer 16.00 +Beth 4 Food+Beer 32.00 +Beth 5 Food+Beer 32.00 +Janet 3 Car+Food+Beer 55.00 +Janet 4 Car 17.00 +Janet 5 Beer+Car+Beer+Food 57.00 +DROP TABLE t1; +# +# Now it can be fully expanded +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 3 Beer 16.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Janet 4 Car 17.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +DROP TABLE t1; +# +# A table showing many calculated results +# +CREATE TABLE t1 ( +WHO CHAR(12) NOT NULL, +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE +Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18 +Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00 +Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12 +DROP TABLE t1; +# +# Expand expense in 3 one week tables +# +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t2; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t3; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t4; +WHO WEEK WHAT AMOUNT +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +# +# The expanded table is made as a TBL table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32), +AMOUNT DOUBLE(8,2)) +ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +DROP TABLE t1, t2, t3, t4; +# +# Three partial JSON tables +# +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; +SELECT * FROM t2; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; +SELECT * FROM t3; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; +SELECT * FROM t4; +WHO WEEK WHAT AMOUNT +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +# +# The complete table can be a multiple JSON table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; +SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; +WHO WEEK WHAT AMOUNT +Beth 3 Beer 16.00 +Beth 4 Beer 15.00 +Beth 4 Food 17.00 +Beth 5 Beer 20.00 +Beth 5 Food 12.00 +Janet 3 Beer 18.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 4 Car 17.00 +Janet 5 Beer 14.00 +Janet 5 Beer 19.00 +Janet 5 Car 12.00 +Janet 5 Food 12.00 +Joe 3 Beer 18.00 +Joe 3 Car 20.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 4 Beer 14.00 +Joe 4 Beer 16.00 +Joe 4 Beer 19.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +DROP TABLE t1; +# +# Or also a partition JSON table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; +ALTER TABLE t1 +PARTITION BY LIST COLUMNS(WEEK) ( +PARTITION `3` VALUES IN(3), +PARTITION `4` VALUES IN(4), +PARTITION `5` VALUES IN(5)); +Warnings: +Warning 1105 Data repartition in 3 is unchecked +Warning 1105 Data repartition in 4 is unchecked +Warning 1105 Data repartition in 5 is unchecked +SHOW WARNINGS; +Level Code Message +Warning 1105 Data repartition in 3 is unchecked +Warning 1105 Data repartition in 4 is unchecked +Warning 1105 Data repartition in 5 is unchecked +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +SELECT * FROM t1 WHERE WEEK = 4; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +DROP TABLE t1, t2, t3, t4; diff --git a/storage/connect/mysql-test/connect/r/bson_java_2.result b/storage/connect/mysql-test/connect/r/bson_java_2.result new file mode 100644 index 00000000000..1c21fc7c54f --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_java_2.result @@ -0,0 +1,385 @@ +set connect_enable_mongo=1; +set connect_json_all_path=0; +# +# Test the MONGO table type +# +CREATE TABLE t1 (Document varchar(1024) JPATH='*') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8; +SELECT * from t1 limit 3; +Document +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":"2014-03-03T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-09-11T00:00:00.000Z"},"grade":"A","score":6},{"date":{"$date":"2013-01-24T00:00:00.000Z"},"grade":"A","score":10},{"date":{"$date":"2011-11-23T00:00:00.000Z"},"grade":"A","score":9},{"date":{"$date":"2011-03-10T00:00:00.000Z"},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":"2014-12-30T00:00:00.000Z"},"grade":"A","score":8},{"date":{"$date":"2014-07-01T00:00:00.000Z"},"grade":"B","score":23},{"date":{"$date":"2013-04-30T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2012-05-08T00:00:00.000Z"},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":"2014-09-06T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-07-22T00:00:00.000Z"},"grade":"A","score":11},{"date":{"$date":"2012-07-31T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2011-12-29T00:00:00.000Z"},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"} +DROP TABLE t1; +# +# Test catfunc +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * from t1; +Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath +_id 1 CHAR 24 24 0 0 _id +address_building 1 CHAR 10 10 0 0 address.building +address_coord 1 CHAR 1024 1024 0 1 address.coord +address_street 1 CHAR 38 38 0 0 address.street +address_zipcode 1 CHAR 5 5 0 0 address.zipcode +borough 1 CHAR 13 13 0 0 +cuisine 1 CHAR 64 64 0 0 +grades_date 1 CHAR 1024 1024 0 1 grades.0.date +grades_grade 1 CHAR 14 14 0 1 grades.0.grade +grades_score 7 INTEGER 2 2 0 1 grades.0.score +name 1 CHAR 98 98 0 0 +restaurant_id 1 CHAR 8 8 0 0 +DROP TABLE t1; +# +# Explicit columns +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(255) NOT NULL, +cuisine VARCHAR(255) NOT NULL, +borough VARCHAR(255) NOT NULL, +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=2'; +SELECT * FROM t1 LIMIT 10; +_id name cuisine borough restaurant_id +58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445 +58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340 +58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841 +58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018 +58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068 +58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151 +58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442 +58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483 +58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649 +58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731 +DROP TABLE t1; +# +# Test discovery +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `cuisine` char(64) NOT NULL, + `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 +SELECT * FROM t1 LIMIT 5; +_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068 +DROP TABLE t1; +# +# Dropping a column +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +_id address borough cuisine name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +DROP TABLE t1; +# +# Specifying Jpath +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(64) NOT NULL, +cuisine CHAR(200) NOT NULL, +borough CHAR(16) NOT NULL, +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 1; +_id 58ada47de5a51ddfcd5ed51c +name Morris Park Bake Shop +cuisine Bakery +borough Bronx +street Morris Park Ave +building 1007 +zipcode 10462 +grade A +score 2 +date 1970-01-01 +restaurant_id 30075445 +SELECT name, street, score, date FROM t1 LIMIT 5; +name street score date +Morris Park Bake Shop Morris Park Ave 2 1970-01-01 +Wendy'S Flatbush Avenue 8 1970-01-01 +Dj Reynolds Pub And Restaurant West 57 Street 2 1970-01-01 +Riviera Caterer Stillwell Avenue 5 1970-01-01 +Tov Kosher Kitchen 63 Road 20 1970-01-01 +SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10; +name cuisine borough +Morris Park Bake Shop Bakery Bronx +Wendy'S Hamburgers Brooklyn +Dj Reynolds Pub And Restaurant Irish Manhattan +Riviera Caterer American Brooklyn +Kosher Island Jewish/Kosher Staten Island +Wilken'S Fine Food Delicatessen Brooklyn +Regina Caterers American Brooklyn +Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn +Wild Asia American Bronx +C & C Catering Service American Brooklyn +SELECT COUNT(*) FROM t1 WHERE grade = 'A'; +COUNT(*) +20687 +SELECT * FROM t1 WHERE cuisine = 'English'; +_id name cuisine borough street building zipcode grade score date restaurant_id +58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 1970-01-01 40391531 +58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 1970-01-01 40392496 +58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 1970-01-01 40816202 +58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 1970-01-01 41022701 +58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 1970-01-01 41076583 +58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 1970-01-01 41443706 +58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 1970-01-01 41448559 +58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 1970-01-01 41513545 +58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 1970-01-01 41557377 +58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 1970-01-01 41625263 +58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 1970-01-01 41633327 +58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 1970-01-01 41660253 +58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 1970-01-01 41664704 +58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 1970-01-01 41690534 +58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 1970-01-01 50000290 +58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 1970-01-01 50011097 +SELECT * FROM t1 WHERE score = building; +_id name cuisine borough street building zipcode grade score date restaurant_id +DROP TABLE t1; +# +# Specifying Filter +# +CREATE TABLE t1 ( +_id CHAR(24) NOT NULL, +name CHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +restaurant_id CHAR(8) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT name FROM t1 WHERE borough = 'Queens'; +name +La Baraka Restaurant +Air France Lounge +Tournesol +Winegasm +Cafe Henri +Bistro 33 +Domaine Wine Bar +Cafe Triskell +Cannelle Patisserie +La Vie +Dirty Pierres Bistro +Fresca La Crepe +Bliss 46 Bistro +Bear +Cuisine By Claudette +Paris Baguette +The Baroness Bar +Francis Cafe +Madame Sou Sou +Crepe 'N' Tearia +Aperitif Bayside Llc +DROP TABLE t1; +# +# Testing pipeline +# +CREATE TABLE t1 ( +name VARCHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +date DATETIME NOT NULL, +grade CHAR(1) NOT NULL, +score INT(4) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}' +OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +name borough date grade score +Tout Va Bien Manhattan 1970-01-01 01:33:34 B 15 +Tout Va Bien Manhattan 1970-01-01 01:33:34 A 13 +Tout Va Bien Manhattan 1970-01-01 01:33:33 C 36 +Tout Va Bien Manhattan 1970-01-01 01:33:33 B 22 +Tout Va Bien Manhattan 1970-01-01 01:33:32 C 36 +Tout Va Bien Manhattan 1970-01-01 01:33:32 C 7 +La Grenouille Manhattan 1970-01-01 01:33:34 A 10 +La Grenouille Manhattan 1970-01-01 01:33:33 A 9 +La Grenouille Manhattan 1970-01-01 01:33:32 A 13 +Le Perigord Manhattan 1970-01-01 01:33:34 B 14 +SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx'; +name grade score date +Bistro Sk A 10 1970-01-01 01:33:34 +Bistro Sk A 12 1970-01-01 01:33:34 +Bistro Sk B 18 1970-01-01 01:33:33 +DROP TABLE t1; +# +# try level 2 discovery +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,level=2,version=2'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096 +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +name borough address_street score +Le Gamin Brooklyn Vanderbilt Avenue 24 +Bistro 33 Queens Ditmars Boulevard 15 +Dirty Pierres Bistro Queens Station Square 22 +Santos Anne Brooklyn Union Avenue 26 +Le Paddock Brooklyn Prospect Avenue 17 +La Crepe Et La Vie Brooklyn Foster Avenue 24 +Francis Cafe Queens Ditmars Boulevard 19 +DROP TABLE t1; +# +# try CRUD operations +# +false +CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +DELETE FROM t1; +INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three'); +SELECT * FROM t1; +_id msg +0 NULL +1 One +2 Two +3 Three +UPDATE t1 SET msg = 'Deux' WHERE _id = 2; +DELETE FROM t1 WHERE msg IS NULL; +SELECT * FROM t1; +_id msg +1 One +2 Deux +3 Three +DELETE FROM t1; +DROP TABLE t1; +true +# +# List states whose population is equal or more than 10 millions +# +false +CREATE TABLE t1 ( +_id char(5) NOT NULL, +city char(16) NOT NULL, +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', +pop int(11) NOT NULL, +state char(2) NOT NULL) +ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities' +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8'; +# Using SQL for grouping +SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC; +state totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +# Using a pipeline for grouping +CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}' +OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +_id totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +true +# +# Test making array +# +CREATE TABLE t1 ( +_id int(4) NOT NULL, +item CHAR(8) NOT NULL, +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +INSERT INTO t1 VALUES +(1,'journal',87,45,63,12,78), +(2,'notebook',123,456,789,NULL,NULL), +(3,'paper',5,7,3,8,NULL), +(4,'planner',25,71,NULL,44,27), +(5,'postcard',5,7,3,8,NULL); +SELECT * FROM t1; +_id item prices_0 prices_1 prices_2 prices_3 prices_4 +1 journal 87 45 63 12 78 +2 notebook 123 456 789 NULL NULL +3 paper 5 7 3 8 NULL +4 planner 25 71 NULL 44 27 +5 postcard 5 7 3 8 NULL +DROP TABLE t1; +# +# Test array aggregation +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}' +OPTION_LIST='Driver=Java,Version=2,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +item total average +journal 285 57.00 +notebook 1368 456.00 +paper 23 5.75 +planner 167 41.75 +postcard 23 5.75 +DROP TABLE t1; +true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/bson_java_3.result b/storage/connect/mysql-test/connect/r/bson_java_3.result new file mode 100644 index 00000000000..d198ee3faa4 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_java_3.result @@ -0,0 +1,385 @@ +set connect_enable_mongo=1; +set connect_json_all_path=0; +# +# Test the MONGO table type +# +CREATE TABLE t1 (Document varchar(1024) JPATH='*') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8; +SELECT * from t1 limit 3; +Document +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"} +DROP TABLE t1; +# +# Test catfunc +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * from t1; +Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath +_id 1 CHAR 24 24 0 0 _id +address_building 1 CHAR 10 10 0 0 address.building +address_coord 1 CHAR 1024 1024 0 1 address.coord +address_street 1 CHAR 38 38 0 0 address.street +address_zipcode 1 CHAR 5 5 0 0 address.zipcode +borough 1 CHAR 13 13 0 0 +cuisine 1 CHAR 64 64 0 0 +grades_date 1 CHAR 1024 1024 0 1 grades.0.date +grades_grade 1 CHAR 14 14 0 1 grades.0.grade +grades_score 7 INTEGER 2 2 0 1 grades.0.score +name 1 CHAR 98 98 0 0 +restaurant_id 1 CHAR 8 8 0 0 +DROP TABLE t1; +# +# Explicit columns +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(255) NOT NULL, +cuisine VARCHAR(255) NOT NULL, +borough VARCHAR(255) NOT NULL, +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=3'; +SELECT * FROM t1 LIMIT 10; +_id name cuisine borough restaurant_id +58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445 +58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340 +58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841 +58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018 +58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068 +58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151 +58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442 +58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483 +58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649 +58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731 +DROP TABLE t1; +# +# Test discovery +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `cuisine` char(64) NOT NULL, + `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 +SELECT * FROM t1 LIMIT 5; +_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +DROP TABLE t1; +# +# Dropping a column +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +_id address borough cuisine name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +DROP TABLE t1; +# +# Specifying Jpath +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(64) NOT NULL, +cuisine CHAR(200) NOT NULL, +borough CHAR(16) NOT NULL, +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 1; +_id 58ada47de5a51ddfcd5ed51c +name Morris Park Bake Shop +cuisine Bakery +borough Bronx +street Morris Park Ave +building 1007 +zipcode 10462 +grade A +score 2 +date 2014-03-03 +restaurant_id 30075445 +SELECT name, street, score, date FROM t1 LIMIT 5; +name street score date +Morris Park Bake Shop Morris Park Ave 2 2014-03-03 +Wendy'S Flatbush Avenue 8 2014-12-30 +Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06 +Riviera Caterer Stillwell Avenue 5 2014-06-10 +Tov Kosher Kitchen 63 Road 20 2014-11-24 +SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10; +name cuisine borough +Morris Park Bake Shop Bakery Bronx +Wendy'S Hamburgers Brooklyn +Dj Reynolds Pub And Restaurant Irish Manhattan +Riviera Caterer American Brooklyn +Kosher Island Jewish/Kosher Staten Island +Wilken'S Fine Food Delicatessen Brooklyn +Regina Caterers American Brooklyn +Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn +Wild Asia American Bronx +C & C Catering Service American Brooklyn +SELECT COUNT(*) FROM t1 WHERE grade = 'A'; +COUNT(*) +20687 +SELECT * FROM t1 WHERE cuisine = 'English'; +_id name cuisine borough street building zipcode grade score date restaurant_id +58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531 +58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496 +58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202 +58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701 +58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583 +58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706 +58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559 +58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545 +58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377 +58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263 +58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327 +58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253 +58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704 +58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534 +58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290 +58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097 +SELECT * FROM t1 WHERE score = building; +_id name cuisine borough street building zipcode grade score date restaurant_id +DROP TABLE t1; +# +# Specifying Filter +# +CREATE TABLE t1 ( +_id CHAR(24) NOT NULL, +name CHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +restaurant_id CHAR(8) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT name FROM t1 WHERE borough = 'Queens'; +name +La Baraka Restaurant +Air France Lounge +Tournesol +Winegasm +Cafe Henri +Bistro 33 +Domaine Wine Bar +Cafe Triskell +Cannelle Patisserie +La Vie +Dirty Pierres Bistro +Fresca La Crepe +Bliss 46 Bistro +Bear +Cuisine By Claudette +Paris Baguette +The Baroness Bar +Francis Cafe +Madame Sou Sou +Crepe 'N' Tearia +Aperitif Bayside Llc +DROP TABLE t1; +# +# Testing pipeline +# +CREATE TABLE t1 ( +name VARCHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +date DATETIME NOT NULL, +grade CHAR(1) NOT NULL, +score INT(4) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}' +OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +name borough date grade score +Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15 +Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13 +Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36 +Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22 +Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36 +Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7 +La Grenouille Manhattan 2014-04-09 02:00:00 A 10 +La Grenouille Manhattan 2013-03-05 01:00:00 A 9 +La Grenouille Manhattan 2012-02-02 01:00:00 A 13 +Le Perigord Manhattan 2014-07-14 02:00:00 B 14 +SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx'; +name grade score date +Bistro Sk A 10 2014-11-21 01:00:00 +Bistro Sk A 12 2014-02-19 01:00:00 +Bistro Sk B 18 2013-06-12 02:00:00 +DROP TABLE t1; +# +# try level 2 discovery +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,level=2,version=3'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096 +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +name borough address_street score +Le Gamin Brooklyn Vanderbilt Avenue 24 +Bistro 33 Queens Ditmars Boulevard 15 +Dirty Pierres Bistro Queens Station Square 22 +Santos Anne Brooklyn Union Avenue 26 +Le Paddock Brooklyn Prospect Avenue 17 +La Crepe Et La Vie Brooklyn Foster Avenue 24 +Francis Cafe Queens Ditmars Boulevard 19 +DROP TABLE t1; +# +# try CRUD operations +# +false +CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +DELETE FROM t1; +INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three'); +SELECT * FROM t1; +_id msg +0 NULL +1 One +2 Two +3 Three +UPDATE t1 SET msg = 'Deux' WHERE _id = 2; +DELETE FROM t1 WHERE msg IS NULL; +SELECT * FROM t1; +_id msg +1 One +2 Deux +3 Three +DELETE FROM t1; +DROP TABLE t1; +true +# +# List states whose population is equal or more than 10 millions +# +false +CREATE TABLE t1 ( +_id char(5) NOT NULL, +city char(16) NOT NULL, +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', +pop int(11) NOT NULL, +state char(2) NOT NULL) +ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities' +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8'; +# Using SQL for grouping +SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC; +state totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +# Using a pipeline for grouping +CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}' +OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +_id totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +true +# +# Test making array +# +CREATE TABLE t1 ( +_id int(4) NOT NULL, +item CHAR(8) NOT NULL, +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +INSERT INTO t1 VALUES +(1,'journal',87,45,63,12,78), +(2,'notebook',123,456,789,NULL,NULL), +(3,'paper',5,7,3,8,NULL), +(4,'planner',25,71,NULL,44,27), +(5,'postcard',5,7,3,8,NULL); +SELECT * FROM t1; +_id item prices_0 prices_1 prices_2 prices_3 prices_4 +1 journal 87 45 63 12 78 +2 notebook 123 456 789 NULL NULL +3 paper 5 7 3 8 NULL +4 planner 25 71 NULL 44 27 +5 postcard 5 7 3 8 NULL +DROP TABLE t1; +# +# Test array aggregation +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}' +OPTION_LIST='Driver=Java,Version=3,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +item total average +journal 285 57.00 +notebook 1368 456.00 +paper 23 5.75 +planner 167 41.75 +postcard 23 5.75 +DROP TABLE t1; +true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/bson_mongo_c.result b/storage/connect/mysql-test/connect/r/bson_mongo_c.result new file mode 100644 index 00000000000..83bf7cd1974 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_mongo_c.result @@ -0,0 +1,385 @@ +set connect_enable_mongo=1; +set connect_json_all_path=0; +# +# Test the MONGO table type +# +CREATE TABLE t1 (Document varchar(1024) JPATH='*') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024 +OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8; +SELECT * from t1 limit 3; +Document +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.8560769999999991,40.8484470000000002],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.9617039999999974,40.6629420000000010],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.9851355999999925,40.7676919000000026],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"} +DROP TABLE t1; +# +# Test catfunc +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * from t1; +Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath +_id 1 CHAR 24 24 0 0 _id +address_building 1 CHAR 10 10 0 0 address.building +address_coord 1 CHAR 1024 1024 0 1 address.coord +address_street 1 CHAR 38 38 0 0 address.street +address_zipcode 1 CHAR 5 5 0 0 address.zipcode +borough 1 CHAR 13 13 0 0 +cuisine 1 CHAR 64 64 0 0 +grades_date 1 CHAR 1024 1024 0 1 grades.0.date +grades_grade 1 CHAR 14 14 0 1 grades.0.grade +grades_score 7 INTEGER 2 2 0 1 grades.0.score +name 1 CHAR 98 98 0 0 +restaurant_id 1 CHAR 8 8 0 0 +DROP TABLE t1; +# +# Explicit columns +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(255) NOT NULL, +cuisine VARCHAR(255) NOT NULL, +borough VARCHAR(255) NOT NULL, +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8 +OPTION_LIST='Driver=C,Version=0'; +SELECT * FROM t1 LIMIT 10; +_id name cuisine borough restaurant_id +58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445 +58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340 +58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841 +58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018 +58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068 +58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151 +58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442 +58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483 +58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649 +58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731 +DROP TABLE t1; +# +# Test discovery +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `cuisine` char(64) NOT NULL, + `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 +SELECT * FROM t1 LIMIT 5; +_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 -73.8560769999999991, 40.8484470000000002 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.9617039999999974, 40.6629420000000010 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.9851355999999925, 40.7676919000000026 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.9824199999999905, 40.5795049999999975 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601151999999956, 40.7311739000000017 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +DROP TABLE t1; +# +# Dropping a column +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1 LIMIT 10; +_id address borough cuisine name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 (-73.8560769999999991, 40.8484470000000002) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.9617039999999974, 40.6629420000000010) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.9851355999999925, 40.7676919000000026) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.9824199999999905, 40.5795049999999975) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601151999999956, 40.7311739000000017) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803826999999984, 40.7643124000000014) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286000000026, 40.6119571999999991) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068505999999985, 40.6199033999999983) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.0052889999999906, 40.6288860000000014) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482608999999940, 40.6408271000000028) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +DROP TABLE t1; +# +# Specifying Jpath +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(64) NOT NULL, +cuisine CHAR(200) NOT NULL, +borough CHAR(16) NOT NULL, +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1 LIMIT 1; +_id 58ada47de5a51ddfcd5ed51c +name Morris Park Bake Shop +cuisine Bakery +borough Bronx +street Morris Park Ave +building 1007 +zipcode 10462 +grade A +score 2 +date 2014-03-03 +restaurant_id 30075445 +SELECT name, street, score, date FROM t1 LIMIT 5; +name street score date +Morris Park Bake Shop Morris Park Ave 2 2014-03-03 +Wendy'S Flatbush Avenue 8 2014-12-30 +Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06 +Riviera Caterer Stillwell Avenue 5 2014-06-10 +Tov Kosher Kitchen 63 Road 20 2014-11-24 +SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10; +name cuisine borough +Morris Park Bake Shop Bakery Bronx +Wendy'S Hamburgers Brooklyn +Dj Reynolds Pub And Restaurant Irish Manhattan +Riviera Caterer American Brooklyn +Kosher Island Jewish/Kosher Staten Island +Wilken'S Fine Food Delicatessen Brooklyn +Regina Caterers American Brooklyn +Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn +Wild Asia American Bronx +C & C Catering Service American Brooklyn +SELECT COUNT(*) FROM t1 WHERE grade = 'A'; +COUNT(*) +20687 +SELECT * FROM t1 WHERE cuisine = 'English'; +_id name cuisine borough street building zipcode grade score date restaurant_id +58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531 +58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496 +58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202 +58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701 +58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583 +58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706 +58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559 +58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545 +58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377 +58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263 +58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327 +58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253 +58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704 +58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534 +58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290 +58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097 +SELECT * FROM t1 WHERE score = building; +_id name cuisine borough street building zipcode grade score date restaurant_id +DROP TABLE t1; +# +# Specifying Filter +# +CREATE TABLE t1 ( +_id CHAR(24) NOT NULL, +name CHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +restaurant_id CHAR(8) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT name FROM t1 WHERE borough = 'Queens'; +name +La Baraka Restaurant +Air France Lounge +Tournesol +Winegasm +Cafe Henri +Bistro 33 +Domaine Wine Bar +Cafe Triskell +Cannelle Patisserie +La Vie +Dirty Pierres Bistro +Fresca La Crepe +Bliss 46 Bistro +Bear +Cuisine By Claudette +Paris Baguette +The Baroness Bar +Francis Cafe +Madame Sou Sou +Crepe 'N' Tearia +Aperitif Bayside Llc +DROP TABLE t1; +# +# Testing pipeline +# +CREATE TABLE t1 ( +name VARCHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +date DATETIME NOT NULL, +grade CHAR(1) NOT NULL, +score INT(4) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}' +OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1 LIMIT 10; +name borough date grade score +Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15 +Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13 +Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36 +Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22 +Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36 +Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7 +La Grenouille Manhattan 2014-04-09 02:00:00 A 10 +La Grenouille Manhattan 2013-03-05 01:00:00 A 9 +La Grenouille Manhattan 2012-02-02 01:00:00 A 13 +Le Perigord Manhattan 2014-07-14 02:00:00 B 14 +SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx'; +name grade score date +Bistro Sk A 10 2014-11-21 01:00:00 +Bistro Sk A 12 2014-02-19 01:00:00 +Bistro Sk B 18 2013-06-12 02:00:00 +DROP TABLE t1; +# +# try level 2 discovery +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +COLIST='{"projection":{"cuisine":0}}' CONNECTION='mongodb://localhost:27017' LRECL=1024 +OPTION_LIST='Driver=C,level=2,version=0'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` double(21,16) DEFAULT NULL `JPATH`='address.coord.0', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024 +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +name borough address_street score +Le Gamin Brooklyn Vanderbilt Avenue 24 +Bistro 33 Queens Ditmars Boulevard 15 +Dirty Pierres Bistro Queens Station Square 22 +Santos Anne Brooklyn Union Avenue 26 +Le Paddock Brooklyn Prospect Avenue 17 +La Crepe Et La Vie Brooklyn Foster Avenue 24 +Francis Cafe Queens Ditmars Boulevard 19 +DROP TABLE t1; +# +# try CRUD operations +# +false +CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +DELETE FROM t1; +INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three'); +SELECT * FROM t1; +_id msg +0 NULL +1 One +2 Two +3 Three +UPDATE t1 SET msg = 'Deux' WHERE _id = 2; +DELETE FROM t1 WHERE msg IS NULL; +SELECT * FROM t1; +_id msg +1 One +2 Deux +3 Three +DELETE FROM t1; +DROP TABLE t1; +true +# +# List states whose population is equal or more than 10 millions +# +false +CREATE TABLE t1 ( +_id char(5) NOT NULL, +city char(16) NOT NULL, +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', +pop int(11) NOT NULL, +state char(2) NOT NULL) +ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities' +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET='utf8'; +# Using SQL for grouping +SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC; +state totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +# Using a pipeline for grouping +CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}' +OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1; +_id totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +true +# +# Test making array +# +CREATE TABLE t1 ( +_id int(4) NOT NULL, +item CHAR(8) NOT NULL, +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8 +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +INSERT INTO t1 VALUES +(1,'journal',87,45,63,12,78), +(2,'notebook',123,456,789,NULL,NULL), +(3,'paper',5,7,3,8,NULL), +(4,'planner',25,71,NULL,44,27), +(5,'postcard',5,7,3,8,NULL); +SELECT * FROM t1; +_id item prices_0 prices_1 prices_2 prices_3 prices_4 +1 journal 87 45 63 12 78 +2 notebook 123 456 789 NULL NULL +3 paper 5 7 3 8 NULL +4 planner 25 71 44 27 NULL +5 postcard 5 7 3 8 NULL +DROP TABLE t1; +# +# Test array aggregation +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}' +OPTION_LIST='Driver=C,Version=0,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1; +item total average +journal 285 57.00 +notebook 1368 456.00 +paper 23 5.75 +planner 167 41.75 +postcard 23 5.75 +DROP TABLE t1; +true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result new file mode 100644 index 00000000000..fef55f7d3d9 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -0,0 +1,685 @@ +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5; +# +# Test UDF's with constant arguments +# +SELECT BsonValue(56, 3.1416, 'foo', NULL); +ERROR HY000: Can't initialize function 'bsonvalue'; Cannot accept more than 1 argument +SELECT BsonValue(3.1416); +BsonValue(3.1416) +3.1416 +SELECT BsonValue(-80); +BsonValue(-80) +-80 +SELECT BsonValue('foo'); +BsonValue('foo') +foo +SELECT BsonValue(9223372036854775807); +BsonValue(9223372036854775807) +9223372036854775807 +SELECT BsonValue(NULL); +BsonValue(NULL) +null +SELECT BsonValue(TRUE); +BsonValue(TRUE) +true +SELECT BsonValue(FALSE); +BsonValue(FALSE) +false +SELECT BsonValue(); +BsonValue() +null +SELECT BsonValue('[11, 22, 33]' json_) FROM t1; +BsonValue('[11, 22, 33]' json_) +[11,22,33] +[11,22,33] +[11,22,33] +[11,22,33] +[11,22,33] +SELECT Bson_Make_Array(); +Bson_Make_Array() +[] +SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL); +Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL) +[56,3.1416,"My name is \"Foo\"",null] +SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE); +Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE) +[[56,3.1416,"foo"],true] +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array; +ERROR HY000: Can't initialize function 'bson_array_add'; This function must have at least 2 arguments +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array; +Array +[56,3.1416,"foo",null,"One more"] +SELECT Bson_Array_Add(BsonValue('one value'), 'One more'); +Bson_Array_Add(BsonValue('one value'), 'One more') +["one value","One more"] +SELECT Bson_Array_Add('one value', 'One more'); +Bson_Array_Add('one value', 'One more') +["one value","One more"] +SELECT Bson_Array_Add('one value' json_, 'One more'); +Bson_Array_Add('one value' json_, 'One more') +["one value","One more"] +SELECT Bson_Array_Add(5 json_, 'One more'); +Bson_Array_Add(5 json_, 'One more') +[5,"One more"] +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0); +Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0) +[4,5,3,8,7,9] +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array; +Array +[5,3,4,8,7,9] +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9); +Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9) +[5,3,8,7,9,4] +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1); +Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1) +[1,2,[11,22],"[2]"] +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1); +Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1) +[1,2,[11,33,22]] +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]'); +Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]') +[1,2,[11,33,22]] +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array; +Array +[56,3.1416,"machin",null,"One more","Two more"] +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1; +Array +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1; +Array +[56,3.1416,"machin",1] +[56,3.1416,"machin",2] +[56,3.1416,"machin",3] +[56,3.1416,"machin",4] +[56,3.1416,"machin",5] +SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1; +Array +[1,3.1416,"machin",1] +[2,3.1416,"machin",2] +[3,3.1416,"machin",3] +[4,3.1416,"machin",4] +[5,3.1416,"machin",5] +SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array; +Array +[56,3.1416,"machin"] +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0); +Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0) +[3.1416,"My name is \"Foo\"",null] +SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2); +Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2) +{"56":56,"3.1416":3.1416,"My name is Foo":"My name is Foo","NULL":null} +Warnings: +Warning 1105 First argument target is not an array +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2'); +Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2') +[56,3.1416,"My name is \"Foo\"",null] +Warnings: +Warning 1105 Missing or null array index +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2); +Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2) +[56,3.1416,"My name is \"Foo\"",null] +Warnings: +Warning 1105 First argument target is not an array +/* WARNING VOID */ +# +SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL); +Bson_Make_Object(56, 3.1416, 'foo', NULL) +{"56":56,"3.1416":3.1416,"foo":"foo","NULL":null} +SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty); +Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty) +{"qty":56,"price":3.1416,"truc":"foo","garanty":null} +SELECT Bson_Make_Object(); +Bson_Make_Object() +{} +SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL); +Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL) +{"Make_Array(56, 3.1416, 'foo')":[56,3.1416,"foo"],"NULL":null} +SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL); +Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL) +[{"qty":56,"price":3.1416,"foo":"foo"},null] +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL); +Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL) +{"qty":56,"price":3.1416,"truc":"machin","garanty":null} +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty'); +ERROR HY000: Can't initialize function 'bson_object_key'; This function must have an even number of arguments +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color); +Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color) +{"qty":56,"price":3.1416,"truc":"machin","garanty":null,"color":"blue"} +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price); +Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price) +{"qty":56,"price":45.99,"truc":"machin","garanty":null} +SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1); +Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1) +NULL +Warnings: +Warning 1105 Error 2 opening notexist.json +Warning 1105 No sub-item at '[1]' +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc'); +Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc') +{"qty":56,"price":3.1416,"garanty":null} +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose'); +Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose') +{"qty":56,"price":3.1416,"truc":"machin","garanty":null} +SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List"; +Key List +["qty","price","truc","garanty"] +SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List"; +Key List +["qty","price","truc","garanty"] +SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List"; +Value List +[1,2,3] +# +# Test UDF's with column arguments +# +SELECT Bsonset_Def_Prec(2); +Bsonset_Def_Prec(2) +2 +CREATE TABLE t2 +( +ISBN CHAR(15), +LANG CHAR(2), +SUBJECT CHAR(32), +AUTHOR CHAR(64), +TITLE CHAR(32), +TRANSLATION CHAR(32), +TRANSLATOR CHAR(80), +PUBLISHER CHAR(32), +DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2; +Bson_Make_Array(AUTHOR, TITLE, DATEPUB) +[" Jean-Christophe Bernadac, François Knab","Construire une application XML",1999] +["William J. Pardi","XML en Action",1999] +SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2; +Bson_Make_Object(AUTHOR, TITLE, DATEPUB) +{"AUTHOR":" Jean-Christophe Bernadac, François Knab","TITLE":"Construire une application XML","DATEPUB":1999} +{"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999} +SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2; +ERROR HY000: Can't initialize function 'bson_array_grp'; This function can only accept 1 argument +SELECT Bson_Array_Grp(TITLE) FROM t2; +Bson_Array_Grp(TITLE) +["Construire une application XML","XML en Action"] +CREATE TABLE t3 ( +SERIALNO CHAR(5) NOT NULL, +NAME VARCHAR(12) NOT NULL FLAG=6, +SEX SMALLINT(1) NOT NULL, +TITLE VARCHAR(15) NOT NULL FLAG=20, +MANAGER CHAR(5) DEFAULT NULL, +DEPARTMENT CHAr(4) NOT NULL FLAG=41, +SECRETARY CHAR(5) DEFAULT NULL FLAG=46, +SALARY DOUBLE(8,2) NOT NULL FLAG=52 +) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1; +SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT'; +Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) +{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.00} +SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT; +DEPARTMENT Bson_Array_Grp(NAME) +0021 ["STRONG","SHORTSIGHT"] +0318 ["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"] +0319 ["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL"] +2452 ["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"] +Warnings: +Warning 1105 Result truncated to json_grp_size values +SELECT BsonSet_Grp_Size(30); +BsonSet_Grp_Size(30) +30 +SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title; +Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) +{"title":"ADMINISTRATOR","names":["GOOSEPEN","FUNNIGUY","SHRINKY"]} +{"title":"DIRECTOR","names":["QUINN","WERTHER","STRONG"]} +{"title":"ENGINEER","names":["BROWNY","ORELLY","MARTIN","TONGHO","WALTER","SMITH"]} +{"title":"PROGRAMMER","names":["BUGHAPPY"]} +{"title":"SALESMAN","names":["WHEELFOR","MERCHANT","BULLOZER","BANCROFT","FODDERMAN"]} +{"title":"SCIENTIST","names":["BIGHEAD","BIGHORN"]} +{"title":"SECRETARY","names":["MESSIFUL","HONEY","SHORTSIGHT","CHERRY","MONAPENNY"]} +{"title":"TYPIST","names":["KITTY","PLUMHEAD"]} +SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) +["0021",["STRONG","SHORTSIGHT"]] +["0318",["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]] +["0319",["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]] +["2452",["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]] +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) +{"DEPARTMENT":"0021","NAMES":["STRONG","SHORTSIGHT"]} +{"DEPARTMENT":"0318","NAMES":["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]} +{"DEPARTMENT":"0319","NAMES":["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]} +{"DEPARTMENT":"2452","NAMES":["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]} +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) +{"DEPARTMENT":"0021","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","TITLE":"DIRECTOR","SALARY":23000.00},{"SERIALNO":"22222","NAME":"SHORTSIGHT","TITLE":"SECRETARY","SALARY":5500.00}]} +{"DEPARTMENT":"0318","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","TITLE":"SALESMAN","SALARY":9600.00},{"SERIALNO":"24888","NAME":"PLUMHEAD","TITLE":"TYPIST","SALARY":2800.00},{"SERIALNO":"27845","NAME":"HONEY","TITLE":"SECRETARY","SALARY":4900.00},{"SERIALNO":"73452","NAME":"TONGHO","TITLE":"ENGINEER","SALARY":6800.00},{"SERIALNO":"74234","NAME":"WALTER","TITLE":"ENGINEER","SALARY":7400.00},{"SERIALNO":"77777","NAME":"SHRINKY","TITLE":"ADMINISTRATOR","SALARY":7500.00},{"SERIALNO":"70012","NAME":"WERTHER","TITLE":"DIRECTOR","SALARY":14500.00},{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.00},{"SERIALNO":"73111","NAME":"WHEELFOR","TITLE":"SALESMAN","SALARY":10030.00}]} +{"DEPARTMENT":"0319","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","TITLE":"SALESMAN","SALARY":14800.00},{"SERIALNO":"40567","NAME":"QUINN","TITLE":"DIRECTOR","SALARY":14000.00},{"SERIALNO":"00137","NAME":"BROWNY","TITLE":"ENGINEER","SALARY":10500.00},{"SERIALNO":"12345","NAME":"KITTY","TITLE":"TYPIST","SALARY":3000.45},{"SERIALNO":"33333","NAME":"MONAPENNY","TITLE":"SECRETARY","SALARY":3800.00},{"SERIALNO":"00023","NAME":"MARTIN","TITLE":"ENGINEER","SALARY":10000.00},{"SERIALNO":"07654","NAME":"FUNNIGUY","TITLE":"ADMINISTRATOR","SALARY":8500.00},{"SERIALNO":"45678","NAME":"BUGHAPPY","TITLE":"PROGRAMMER","SALARY":8500.00},{"SERIALNO":"56789","NAME":"FODDERMAN","TITLE":"SALESMAN","SALARY":7000.00},{"SERIALNO":"55555","NAME":"MESSIFUL","TITLE":"SECRETARY","SALARY":5000.50},{"SERIALNO":"98765","NAME":"GOOSEPEN","TITLE":"ADMINISTRATOR","SALARY":4700.00}]} +{"DEPARTMENT":"2452","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","TITLE":"SCIENTIST","SALARY":8000.00},{"SERIALNO":"31416","NAME":"ORELLY","TITLE":"ENGINEER","SALARY":13400.00},{"SERIALNO":"36666","NAME":"BIGHORN","TITLE":"SCIENTIST","SALARY":11000.00},{"SERIALNO":"02345","NAME":"SMITH","TITLE":"ENGINEER","SALARY":9000.00},{"SERIALNO":"11111","NAME":"CHERRY","TITLE":"SECRETARY","SALARY":4500.00}]} +SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE; +Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) +{"DEPARTMENT":"0021","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","SALARY":23000.00}]} +{"DEPARTMENT":"0021","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"22222","NAME":"SHORTSIGHT","SALARY":5500.00}]} +{"DEPARTMENT":"0318","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"77777","NAME":"SHRINKY","SALARY":7500.00}]} +{"DEPARTMENT":"0318","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"70012","NAME":"WERTHER","SALARY":14500.00}]} +{"DEPARTMENT":"0318","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"73452","NAME":"TONGHO","SALARY":6800.00},{"SERIALNO":"74234","NAME":"WALTER","SALARY":7400.00}]} +{"DEPARTMENT":"0318","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","SALARY":9600.00},{"SERIALNO":"78943","NAME":"MERCHANT","SALARY":8700.00},{"SERIALNO":"73111","NAME":"WHEELFOR","SALARY":10030.00}]} +{"DEPARTMENT":"0318","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"27845","NAME":"HONEY","SALARY":4900.00}]} +{"DEPARTMENT":"0318","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"24888","NAME":"PLUMHEAD","SALARY":2800.00}]} +{"DEPARTMENT":"0319","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"98765","NAME":"GOOSEPEN","SALARY":4700.00},{"SERIALNO":"07654","NAME":"FUNNIGUY","SALARY":8500.00}]} +{"DEPARTMENT":"0319","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"40567","NAME":"QUINN","SALARY":14000.00}]} +{"DEPARTMENT":"0319","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"00023","NAME":"MARTIN","SALARY":10000.00},{"SERIALNO":"00137","NAME":"BROWNY","SALARY":10500.00}]} +{"DEPARTMENT":"0319","TITLE":"PROGRAMMER","EMPLOYES":[{"SERIALNO":"45678","NAME":"BUGHAPPY","SALARY":8500.00}]} +{"DEPARTMENT":"0319","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","SALARY":14800.00},{"SERIALNO":"56789","NAME":"FODDERMAN","SALARY":7000.00}]} +{"DEPARTMENT":"0319","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"33333","NAME":"MONAPENNY","SALARY":3800.00},{"SERIALNO":"55555","NAME":"MESSIFUL","SALARY":5000.50}]} +{"DEPARTMENT":"0319","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"12345","NAME":"KITTY","SALARY":3000.45}]} +{"DEPARTMENT":"2452","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"31416","NAME":"ORELLY","SALARY":13400.00},{"SERIALNO":"02345","NAME":"SMITH","SALARY":9000.00}]} +{"DEPARTMENT":"2452","TITLE":"SCIENTIST","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","SALARY":8000.00},{"SERIALNO":"36666","NAME":"BIGHORN","SALARY":11000.00}]} +{"DEPARTMENT":"2452","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"11111","NAME":"CHERRY","SALARY":4500.00}]} +SELECT Bson_Object_Grp(SALARY) FROM t3; +ERROR HY000: Can't initialize function 'bson_object_grp'; This function requires 2 arguments (key, value) +SELECT Bson_Object_Grp(NAME, SALARY) FROM t3; +Bson_Object_Grp(NAME, SALARY) +{"BANCROFT":9600.00,"SMITH":9000.00,"MERCHANT":8700.00,"FUNNIGUY":8500.00,"BUGHAPPY":8500.00,"BIGHEAD":8000.00,"SHRINKY":7500.00,"WALTER":7400.00,"FODDERMAN":7000.00,"TONGHO":6800.00,"SHORTSIGHT":5500.00,"MESSIFUL":5000.50,"HONEY":4900.00,"GOOSEPEN":4700.00,"CHERRY":4500.00,"MONAPENNY":3800.00,"KITTY":3000.45,"PLUMHEAD":2800.00,"STRONG":23000.00,"BULLOZER":14800.00,"WERTHER":14500.00,"QUINN":14000.00,"ORELLY":13400.00,"BIGHORN":11000.00,"BROWNY":10500.00,"WHEELFOR":10030.00,"MARTIN":10000.00} +SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") +{"DEPARTMENT":"0021","SALARIES":{"STRONG":23000.00,"SHORTSIGHT":5500.00}} +{"DEPARTMENT":"0318","SALARIES":{"BANCROFT":9600.00,"PLUMHEAD":2800.00,"HONEY":4900.00,"TONGHO":6800.00,"WALTER":7400.00,"SHRINKY":7500.00,"WERTHER":14500.00,"MERCHANT":8700.00,"WHEELFOR":10030.00}} +{"DEPARTMENT":"0319","SALARIES":{"BULLOZER":14800.00,"QUINN":14000.00,"BROWNY":10500.00,"KITTY":3000.45,"MONAPENNY":3800.00,"MARTIN":10000.00,"FUNNIGUY":8500.00,"BUGHAPPY":8500.00,"FODDERMAN":7000.00,"MESSIFUL":5000.50,"GOOSEPEN":4700.00}} +{"DEPARTMENT":"2452","SALARIES":{"BIGHEAD":8000.00,"ORELLY":13400.00,"BIGHORN":11000.00,"SMITH":9000.00,"CHERRY":4500.00}} +SELECT Bson_Array_Grp(NAME) FROM t3; +Bson_Array_Grp(NAME) +["BANCROFT","SMITH","MERCHANT","FUNNIGUY","BUGHAPPY","BIGHEAD","SHRINKY","WALTER","FODDERMAN","TONGHO","SHORTSIGHT","MESSIFUL","HONEY","GOOSEPEN","CHERRY","MONAPENNY","KITTY","PLUMHEAD","STRONG","BULLOZER","WERTHER","QUINN","ORELLY","BIGHORN","BROWNY","WHEELFOR","MARTIN"] +SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318; +Bson_Object_Key(name, title) +{"BANCROFT":"SALESMAN"} +{"MERCHANT":"SALESMAN"} +{"SHRINKY":"ADMINISTRATOR"} +{"WALTER":"ENGINEER"} +{"TONGHO":"ENGINEER"} +{"HONEY":"SECRETARY"} +{"PLUMHEAD":"TYPIST"} +{"WERTHER":"DIRECTOR"} +{"WHEELFOR":"SALESMAN"} +SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318; +Bson_Object_Grp(name, title) +{"BANCROFT":"SALESMAN","MERCHANT":"SALESMAN","SHRINKY":"ADMINISTRATOR","WALTER":"ENGINEER","TONGHO":"ENGINEER","HONEY":"SECRETARY","PLUMHEAD":"TYPIST","WERTHER":"DIRECTOR","WHEELFOR":"SALESMAN"} +# +# Test value getting UDF's +# +SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3; +BsonGet_String(Bson_Array_Grp(name),'[#]') +27 +SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3; +BsonGet_String(Bson_Array_Grp(name),'[","]') +BANCROFT,SMITH,MERCHANT,FUNNIGUY,BUGHAPPY,BIGHEAD,SHRINKY,WALTER,FODDERMAN,TONGHO,SHORTSIGHT,MESSIFUL,HONEY,GOOSEPEN,CHERRY,MONAPENNY,KITTY,PLUMHEAD,STRONG,BULLOZER,WERTHER,QUINN,ORELLY,BIGHORN,BROWNY,WHEELFOR,MARTIN +SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3; +BsonGet_String(Bson_Array_Grp(name),'[>]') +WHEELFOR +SET @j1 = '[45,28,36,45,89]'; +SELECT BsonGet_String(@j1,'1'); +BsonGet_String(@j1,'1') +28 +SELECT BsonGet_String(@j1 json_,'3'); +BsonGet_String(@j1 json_,'3') +45 +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3'); +BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3') +45 +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum"; +list egal sum +45+28+36+45+89 = 243 +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0'); +BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0') +36 +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*'); +BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*') +[36,45,89] +SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc'); +BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc') +machin +SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}'; +SELECT BsonGet_String(@j2 json_,'truc'); +BsonGet_String(@j2 json_,'truc') +machin +SELECT BsonGet_String(@j2,'truc'); +BsonGet_String(@j2,'truc') +machin +SELECT BsonGet_String(@j2,'chose'); +BsonGet_String(@j2,'chose') +NULL +SELECT BsonGet_String(NULL json_, NULL); +BsonGet_String(NULL json_, NULL) +NULL +Warnings: +Warning 1105 +/* NULL WARNING */ +SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; +department Sumsal +0021 28500.00 +0318 72230.00 +0319 89800.95 +2452 45900.00 +SELECT BsonGet_Int(@j1, '4'); +BsonGet_Int(@j1, '4') +89 +SELECT BsonGet_Int(@j1, '[#]'); +BsonGet_Int(@j1, '[#]') +5 +SELECT BsonGet_Int(@j1, '[+]'); +BsonGet_Int(@j1, '[+]') +243 +SELECT BsonGet_Int(@j1 json_, '3'); +BsonGet_Int(@j1 json_, '3') +45 +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3'); +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3') +45 +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]'); +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]') +45 +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]'); +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') +243 +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0') +36 +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1'); +BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1') +28 +SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty'); +BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty') +56 +SELECT BsonGet_Int(@j2 json_, 'price'); +BsonGet_Int(@j2 json_, 'price') +3 +SELECT BsonGet_Int(@j2, 'qty'); +BsonGet_Int(@j2, 'qty') +56 +SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose') +NULL +SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum; +sum +170 +SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department; +department Sumsal +0021 28500 +0318 72230 +0319 89800 +2452 45900 +SELECT BsonGet_Real(@j1, '2'); +BsonGet_Real(@j1, '2') +36.000000000000000 +SELECT BsonGet_Real(@j1 json_, '3', 2); +BsonGet_Real(@j1 json_, '3', 2) +45.00 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3') +45.000000000000000 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]') +45.000000000000000 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]') +243.000000000000000 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]') +48.600000000000000 +SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0') +36.000000000000000 +SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price'); +BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price') +3.141600000000000 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty'); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty') +56.000000000000000 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price'); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price') +3.141600000000000 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4) +3.1416 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose') +NULL +SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; +department Sumsal +0021 28500.000000000000000 +0318 72230.000000000000000 +0319 89800.950000000000000 +2452 45900.000000000000000 +# +# Documentation examples +# +SELECT +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank", +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number", +BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat", +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum", +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg"; +Rank Number Concat Sum Avg +89 5 45,28,36,45,89 243 48.60 +SELECT +BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String", +BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int", +BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real"; +String Int Real +29.50 29 29.500000000000000 +SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real"; +Real +29.500 +# +# Testing Locate +# +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin'); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin') +$.truc +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56) +$.qty +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416) +$.price +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose'); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose') +NULL +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path; +Path +$.AUTHORS[1].FN +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path; +Path +$.AUTHORS[1].FN +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path; +Path +$.AUTHORS[1] +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path; +Path +NULL +SELECT BsonLocate('[45,28,36,45,89]',36); +BsonLocate('[45,28,36,45,89]',36) +$[2] +SELECT BsonLocate('[45,28,36,45,89]' json_,28.0); +BsonLocate('[45,28,36,45,89]' json_,28.0) +NULL +SELECT Bson_Locate_All('[45,28,36,45,89]',10); +Bson_Locate_All('[45,28,36,45,89]',10) +[] +SELECT Bson_Locate_All('[45,28,36,45,89]',45); +Bson_Locate_All('[45,28,36,45,89]',45) +["$[0]","$[3]"] +SELECT Bson_Locate_All('[[45,28],36,45,89]',45); +Bson_Locate_All('[[45,28],36,45,89]',45) +["$[0][0]","$[2]"] +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45); +Bson_Locate_All('[[45,28,45],36,45,89]',45) +["$[0][0]","$[0][2]","$[2]"] +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]')); +Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]')) +["$[0][0]","$[0][2]","$[2]"] +SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1; +BsonLocate('[[45,28,45],36,45,89]',45,n) +$[0][0] +$[0][2] +$[2] +NULL +NULL +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1; +BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) +$[0][0] +$[0][2] +$[2] +NULL +NULL +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL; +Path +$[0][0] +$[0][2] +$[2] +SELECT Bson_Locate_All('[45,28,[36,45,89]]',45); +Bson_Locate_All('[45,28,[36,45,89]]',45) +["$[0]","$[2][1]"] +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0)); +Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0)) +[] +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0); +Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0) +["$[1][1]"] +SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_); +BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_) +$[1] +SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_); +BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_) +$[0] +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths"; +All paths +[] +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_); +Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_) +["$[1][0]"] +SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs"; +Nb of occurs +2 +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2); +Bson_Locate_All('[[45,28],[[36,45],89]]',45,2) +["$[0][0]"] +SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0'); +BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0') +$[0] +SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab'); +BsonLocate(Bson_File('test/biblio.json'), 'Knab') +$[0].AUTHOR[1].LASTNAME +SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab'); +Bson_Locate_All('test/biblio.json' jfile_, 'Knab') +["$[0].AUTHOR[1].LASTNAME"] +# +# Testing json files +# +SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]}, +{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}, +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]}, +{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile; +NewFile +test/fx.json +SELECT Bfile_Make('test/fx.json', 1); +Bfile_Make('test/fx.json', 1) +test/fx.json +SELECT Bfile_Make('test/fx.json' jfile_); +Bfile_Make('test/fx.json' jfile_) +test/fx.json +SELECT Bfile_Make(Bbin_File('test/fx.json'), 0); +Bfile_Make(Bbin_File('test/fx.json'), 0) +test/fx.json +SELECT Bson_File('test/fx.json', 1); +Bson_File('test/fx.json', 1) +[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] +Warnings: +Warning 1105 File pretty format doesn't match the specified pretty value +SELECT Bson_File('test/fx.json', 2); +Bson_File('test/fx.json', 2) +[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] +Warnings: +Warning 1105 File pretty format doesn't match the specified pretty value +SELECT Bson_File('test/fx.json', 0); +Bson_File('test/fx.json', 0) +[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] +SELECT Bson_File('test/fx.json', '0'); +Bson_File('test/fx.json', '0') +{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]} +SELECT Bson_File('test/fx.json', '[?]'); +Bson_File('test/fx.json', '[?]') +NULL +Warnings: +Warning 1105 Invalid function specification ? +SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*'); +BsonGet_String(Bson_File('test/fx.json'), '1.*') +{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]} +SELECT BsonGet_String(Bson_File('test/fx.json'), '1'); +BsonGet_String(Bson_File('test/fx.json'), '1') +6 car roadster 56000 (6, 9) +SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage; +Mileage +56000 +SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price; +Price +5.65 +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings'); +Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings') +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4,6]} +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings'); +Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings') +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,6,4]} +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1); +Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1) +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,6,4]} +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0); +Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0) +[6,2,4] +SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1); +Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1) +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2]} +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin); +Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin) +{"_id":7,"type":"food","item":"meat","origin":"france","ratings":[2,4]} +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size'); +Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size') +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]} +Warnings: +Warning 1105 No sub-item at 'size' +SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size'); +Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size') +{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":70},"ratings":[5,8,7]} +SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size')); +Bson_Object_List(Bson_File('test/fx.json', '3.size')) +["W","L","H"] +# +# Testing new functions +# +SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result"; +Result +["a","b","c","d","e","f"] +SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result"; +Result +["a","b","c","d","e","f"] +SELECT +Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set", +Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert", +Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update"; +Set Insert Update +[1,"foo",3,{"quatre":4,"cinq":5}] [1,2,3,{"quatre":4,"cinq":5}] [1,"foo",3,{"quatre":4}] +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux'); +bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux') +[1,3,{"quatre":4}] +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]'); +bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]') +[1,3,{"quatre":4}] +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); +bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux') +[1,2,3,{"quatre":4}] +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +SELECT BsonSet_Grp_Size(10); +BsonSet_Grp_Size(10) +10 diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result index ec314c5f072..d895a9aed87 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_oracle.result +++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result @@ -3,7 +3,7 @@ command varchar(128) not null, number int(5) not null flag=1, message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager,Execsrc=1'; +OPTION_LIST='User=system,Password=Choupy01,Execsrc=1'; SELECT * FROM t2 WHERE command = 'drop table employee'; command number message drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist @@ -23,14 +23,14 @@ Warnings: Warning 1105 Affected rows CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1 WHERE table_name='employee'; Table_Cat Table_Schema Table_Name Table_Type Remark NULL SYSTEM EMPLOYEE TABLE NULL DROP TABLE t1; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL @@ -42,7 +42,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP HOST 'jdbc:oracle:thin:@localhost:1521:xe', DATABASE 'SYSTEM', USER 'system', -PASSWORD 'manager', +PASSWORD 'Choupy01', PORT 0, SOCKET '', OWNER 'SYSTEM'); diff --git a/storage/connect/mysql-test/connect/r/json.result b/storage/connect/mysql-test/connect/r/json.result index 6b6f40d2c47..dc527acd4a3 100644 --- a/storage/connect/mysql-test/connect/r/json.result +++ b/storage/connect/mysql-test/connect/r/json.result @@ -15,7 +15,7 @@ DATEPUB int(4) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB -9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Jean-Christophe Bernadac, François Knab Construire une application XML NULL NULL Eyrolles Paris 1999 9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999 DROP TABLE t1; # @@ -24,15 +24,15 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', -Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB' +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +Authors INT(2) JPATH='$.AUTHOR[#]', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -46,16 +46,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME', -AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', -Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB' +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -69,16 +69,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', -AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', -Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB' +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -176,17 +176,17 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15) NOT NULL, -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', -AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX', -TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME', -TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB', +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', +TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', +TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB', INDEX IX(ISBN) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; @@ -209,9 +209,9 @@ DROP TABLE t1; # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; WHO WEEK WHAT AMOUNT @@ -230,9 +230,9 @@ DROP TABLE t1; # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; WHO WEEK WHAT AMOUNT @@ -266,14 +266,14 @@ DROP TABLE t1; # CREATE TABLE t1 ( WHO CHAR(12) NOT NULL, -WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER', -SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT', -SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT', -AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT', -SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT', -AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT', -AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT', -AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT') +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE @@ -286,9 +286,9 @@ DROP TABLE t1; # CREATE TABLE t2 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t2; WHO WEEK WHAT AMOUNT @@ -302,9 +302,9 @@ Janet 3 Food 18.00 Janet 3 Beer 18.00 CREATE TABLE t3 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t3; WHO WEEK WHAT AMOUNT @@ -318,9 +318,9 @@ Beth 4 Beer 15.00 Janet 4 Car 17.00 CREATE TABLE t4 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t4; WHO WEEK WHAT AMOUNT @@ -374,8 +374,8 @@ DROP TABLE t1, t2, t3, t4; CREATE TABLE t2 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json'; SELECT * FROM t2; WHO WEEK WHAT AMOUNT @@ -390,8 +390,8 @@ Janet 3 Beer 18.00 CREATE TABLE t3 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json'; SELECT * FROM t3; WHO WEEK WHAT AMOUNT @@ -406,8 +406,8 @@ Janet 4 Car 17.00 CREATE TABLE t4 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json'; SELECT * FROM t4; WHO WEEK WHAT AMOUNT @@ -425,8 +425,8 @@ Janet 5 Food 12.00 CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1; SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; WHO WEEK WHAT AMOUNT @@ -461,8 +461,8 @@ DROP TABLE t1; CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json'; ALTER TABLE t1 PARTITION BY LIST COLUMNS(WEEK) ( diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result index 47fc4abbd28..e0b08889f40 100644 --- a/storage/connect/mysql-test/connect/r/json_java_2.result +++ b/storage/connect/mysql-test/connect/r/json_java_2.result @@ -1,8 +1,9 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +16,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath _id 1 CHAR 24 24 0 0 _id @@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0 cuisine 1 CHAR 64 64 0 0 grades_date 1 CHAR 1024 1024 0 1 grades.0.date grades_grade 1 CHAR 14 14 0 1 grades.0.grade -grades_score 5 BIGINT 2 2 0 1 grades.0.score +grades_score 7 INTEGER 2 2 0 1 grades.0.score name 1 CHAR 98 98 0 0 restaurant_id 1 CHAR 8 8 0 0 DROP TABLE t1; @@ -60,7 +61,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` ( `cuisine` char(64) NOT NULL, `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068 DROP TABLE t1; # # Dropping a column @@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET= COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * FROM t1 LIMIT 10; _id address borough cuisine name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 -58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 -58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 -58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 -58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 -58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 DROP TABLE t1; # # Specifying Jpath @@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; @@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` ( `borough` char(13) NOT NULL, `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096 @@ -305,8 +306,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities' @@ -344,11 +345,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result index 720c82cd7f9..b9ba919507d 100644 --- a/storage/connect/mysql-test/connect/r/json_java_3.result +++ b/storage/connect/mysql-test/connect/r/json_java_3.result @@ -1,8 +1,9 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +16,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath _id 1 CHAR 24 24 0 0 _id @@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0 cuisine 1 CHAR 64 64 0 0 grades_date 1 CHAR 1024 1024 0 1 grades.0.date grades_grade 1 CHAR 14 14 0 1 grades.0.grade -grades_score 5 BIGINT 2 2 0 1 grades.0.score +grades_score 7 INTEGER 2 2 0 1 grades.0.score name 1 CHAR 98 98 0 0 restaurant_id 1 CHAR 8 8 0 0 DROP TABLE t1; @@ -60,7 +61,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` ( `cuisine` char(64) NOT NULL, `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 DROP TABLE t1; # # Dropping a column @@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET= COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * FROM t1 LIMIT 10; _id address borough cuisine name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 -58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 -58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 -58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 -58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 -58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 DROP TABLE t1; # # Specifying Jpath @@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; @@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` ( `borough` char(13) NOT NULL, `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096 @@ -305,8 +306,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities' @@ -344,11 +345,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result index f9bfc01763e..482ccc85b57 100644 --- a/storage/connect/mysql-test/connect/r/json_mongo_c.result +++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result @@ -1,8 +1,9 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024 OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +16,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024; +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath _id 1 CHAR 24 24 0 0 _id @@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0 cuisine 1 CHAR 64 64 0 0 grades_date 1 CHAR 1024 1024 0 1 grades.0.date grades_grade 1 CHAR 14 14 0 1 grades.0.grade -grades_score 5 BIGINT 2 2 0 1 grades.0.score +grades_score 7 INTEGER 2 2 0 1 grades.0.score name 1 CHAR 98 98 0 0 restaurant_id 1 CHAR 8 8 0 0 DROP TABLE t1; @@ -60,7 +61,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants -OPTION_LIST='Level=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` ( `cuisine` char(64) NOT NULL, `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089, 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745, 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451, 40.767691900000002647 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523, 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639, 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 DROP TABLE t1; # # Dropping a column @@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET= COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; SELECT * FROM t1 LIMIT 10; _id address borough cuisine name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 40.767691900000002647 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 -58ada47de5a51ddfcd5ed521 8825 -73.880382699999998408 40.764312400000001446 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 -58ada47de5a51ddfcd5ed522 2206 -74.137728600000002643 40.611957199999999091 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 -58ada47de5a51ddfcd5ed523 7114 -73.906850599999998508 40.619903399999998328 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 -58ada47de5a51ddfcd5ed524 6409 -74.005288999999990551 40.628886000000001388 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 -58ada47de5a51ddfcd5ed525 1839 -73.948260899999993967 40.640827100000002758 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +58ada47de5a51ddfcd5ed51c 1007 (-73.856076999999999089, 40.848447000000000173) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.96170399999999745, 40.66294200000000103) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.985135599999992451, 40.767691900000002647) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.982419999999990523, 40.579504999999997494) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.860115199999995639, 40.731173900000001709) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.880382699999998408, 40.764312400000001446) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.137728600000002643, 40.611957199999999091) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.906850599999998508, 40.619903399999998328) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.005288999999990551, 40.628886000000001388) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.948260899999993967, 40.640827100000002758) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 DROP TABLE t1; # # Specifying Jpath @@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; @@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` ( `borough` char(13) NOT NULL, `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024 @@ -305,8 +306,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities' @@ -344,11 +345,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/json_udf.result b/storage/connect/mysql-test/connect/r/json_udf.result index 09544bb1ecb..8315fc3f3bf 100644 --- a/storage/connect/mysql-test/connect/r/json_udf.result +++ b/storage/connect/mysql-test/connect/r/json_udf.result @@ -187,11 +187,11 @@ DATEPUB int(4) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT Json_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2; Json_Make_Array(AUTHOR, TITLE, DATEPUB) -["Jean-Christophe Bernadac","Construire une application XML",1999] +[" Jean-Christophe Bernadac, François Knab","Construire une application XML",1999] ["William J. Pardi","XML en Action",1999] SELECT Json_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2; Json_Make_Object(AUTHOR, TITLE, DATEPUB) -{"AUTHOR":"Jean-Christophe Bernadac","TITLE":"Construire une application XML","DATEPUB":1999} +{"AUTHOR":" Jean-Christophe Bernadac, François Knab","TITLE":"Construire une application XML","DATEPUB":1999} {"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999} SELECT Json_Array_Grp(TITLE, DATEPUB) FROM t2; ERROR HY000: Can't initialize function 'json_array_grp'; This function can only accept 1 argument @@ -610,7 +610,7 @@ JsonGet_String(Json_File('test/fx.json'), '1.*') {"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]} SELECT JsonGet_String(Json_File('test/fx.json'), '1'); JsonGet_String(Json_File('test/fx.json'), '1') -6 car roadster 56000 6 9 +6 car roadster 56000 (6, 9) SELECT JsonGet_Int(Json_File('test/fx.json'), '1.mileage') AS Mileage; Mileage 56000 diff --git a/storage/connect/mysql-test/connect/r/json_udf_bin.result b/storage/connect/mysql-test/connect/r/json_udf_bin.result index d0819619c33..c20cf7ce632 100644 --- a/storage/connect/mysql-test/connect/r/json_udf_bin.result +++ b/storage/connect/mysql-test/connect/r/json_udf_bin.result @@ -87,7 +87,7 @@ Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv') {"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}} SELECT JsonGet_String(Json_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') lang; lang -GML +GML, XML SELECT Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') "See also"; See also ["GML","XML"] diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result index 132bb34ce64..8b86ce32943 100644 --- a/storage/connect/mysql-test/connect/r/mongo_c.result +++ b/storage/connect/mysql-test/connect/r/mongo_c.result @@ -1,8 +1,9 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +16,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 ; +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 ; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath _id 1 CHAR 24 24 0 0 @@ -58,7 +59,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants -OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` ( `grades_0` varchar(512) DEFAULT NULL `FIELD_FORMAT`='grades.0', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 Morris Park Ave 10462 Bronx Bakery {"date":{"$date":1393804800000},"grade":"A","score":2} Morris Park Bake Shop 30075445 @@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' ; @@ -301,8 +302,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities' @@ -340,11 +341,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' ; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result index bc186d7137e..cccda2760d6 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_2.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result @@ -1,8 +1,9 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +16,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ; +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath _id 1 CHAR 24 24 0 0 @@ -58,7 +59,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` ( `grades_0` char(99) DEFAULT NULL `FIELD_FORMAT`='grades.0', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 [ -73.856077 , 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : "2014-03-03T00:00:00.000Z"} , "grade" : "A" , "score" : 2} Morris Park Bake Shop 30075445 @@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' ; @@ -301,8 +302,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities' @@ -340,11 +341,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' ; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result index 30c696fc9eb..ae39148a156 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_3.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result @@ -1,8 +1,9 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +16,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ; +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath _id 1 CHAR 24 24 0 0 @@ -58,7 +59,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` ( `grades_0` char(84) DEFAULT NULL `FIELD_FORMAT`='grades.0', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 [-73.856077, 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : 1393804800000 }, "grade" : "A", "score" : 2 } Morris Park Bake Shop 30075445 @@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' ; @@ -301,8 +302,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities' @@ -340,11 +341,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' ; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/odbc_oracle.result b/storage/connect/mysql-test/connect/r/odbc_oracle.result index 8dc7dc07bb1..acb7d9a74c9 100644 --- a/storage/connect/mysql-test/connect/r/odbc_oracle.result +++ b/storage/connect/mysql-test/connect/r/odbc_oracle.result @@ -10,7 +10,7 @@ SET NAMES utf8; # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -20,7 +20,7 @@ NULL MTR V1 VIEW NULL DROP TABLE t1; # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.%'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -30,7 +30,7 @@ NULL MTR V1 VIEW NULL DROP TABLE t1; # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -38,7 +38,7 @@ NULL MTR T1 TABLE NULL DROP TABLE t1; # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -46,7 +46,7 @@ NULL MTR T1 TABLE NULL DROP TABLE t1; # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -54,7 +54,7 @@ NULL MTR T1 TABLE NULL DROP TABLE t1; # All tables in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.%'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -68,7 +68,7 @@ DROP TABLE t1; # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -80,7 +80,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu DROP TABLE t1; # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.%'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -91,7 +91,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu MTR V1 B 6 NUMBER 38 40 NULL NULL 1 DROP TABLE t1; # All tables "T1" in all schemas (limited with WHERE) -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks MTR T1 A 3 DECIMAL 38 40 0 10 1 @@ -99,7 +99,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu DROP TABLE t1; # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -108,7 +108,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu DROP TABLE t1; # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -121,14 +121,14 @@ DROP TABLE t1; # Table "T1" in the default schema ("MTR") CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='T1'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` decimal(40,0) DEFAULT NULL, `B` double DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1' SELECT * FROM t1 ORDER BY A; A B 10 1000000000 @@ -157,14 +157,14 @@ DROP VIEW v1; DROP TABLE t1; # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T1'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` decimal(40,0) DEFAULT NULL, `B` double DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1' SELECT * FROM t1; A B 10 1000000000 @@ -173,14 +173,14 @@ A B DROP TABLE t1; # View "V1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.V1'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` decimal(40,0) DEFAULT NULL, `B` double DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1' SELECT * FROM t1; A B 10 1000000000 @@ -209,13 +209,13 @@ DROP VIEW v1; DROP TABLE t1; # Table "T2" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T2'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` varchar(64) DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2' SELECT * FROM t1; A test diff --git a/storage/connect/mysql-test/connect/r/rest.result b/storage/connect/mysql-test/connect/r/rest.result new file mode 100644 index 00000000000..3c4ec80ce71 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/rest.result @@ -0,0 +1,19 @@ +# +# Testing REST query +# +CREATE TABLE t1 +ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json' +HTTP='http://jsonplaceholder.typicode.com/users'; +SELECT * FROM t1; +id name username email address_street address_suite address_city address_zipcode address_geo_lat address_geo_lng phone website company_name company_catchPhrase company_bs +1 Leanne Graham Bret Sincere@april.biz Kulas Light Apt. 556 Gwenborough 92998-3874 -37.3159 81.1496 1-770-736-8031 x56442 hildegard.org Romaguera-Crona Multi-layered client-server neural-net harness real-time e-markets +2 Ervin Howell Antonette Shanna@melissa.tv Victor Plains Suite 879 Wisokyburgh 90566-7771 -43.9509 -34.4618 010-692-6593 x09125 anastasia.net Deckow-Crist Proactive didactic contingency synergize scalable supply-chains +3 Clementine Bauch Samantha Nathan@yesenia.net Douglas Extension Suite 847 McKenziehaven 59590-4157 -68.6102 -47.0653 1-463-123-4447 ramiro.info Romaguera-Jacobson Face to face bifurcated interface e-enable strategic applications +4 Patricia Lebsack Karianne Julianne.OConner@kory.org Hoeger Mall Apt. 692 South Elvis 53919-4257 29.4572 -164.2990 493-170-9623 x156 kale.biz Robel-Corkery Multi-tiered zero tolerance productivity transition cutting-edge web services +5 Chelsey Dietrich Kamren Lucio_Hettinger@annie.ca Skiles Walks Suite 351 Roscoeview 33263 -31.8129 62.5342 (254)954-1289 demarco.info Keebler LLC User-centric fault-tolerant solution revolutionize end-to-end systems +6 Mrs. Dennis Schulist Leopoldo_Corkery Karley_Dach@jasper.info Norberto Crossing Apt. 950 South Christy 23505-1337 -71.4197 71.7478 1-477-935-8478 x6430 ola.org Considine-Lockman Synchronised bottom-line interface e-enable innovative applications +7 Kurtis Weissnat Elwyn.Skiles Telly.Hoeger@billy.biz Rex Trail Suite 280 Howemouth 58804-1099 24.8918 21.8984 210.067.6132 elvis.io Johns Group Configurable multimedia task-force generate enterprise e-tailers +8 Nicholas Runolfsdottir V Maxime_Nienow Sherwood@rosamond.me Ellsworth Summit Suite 729 Aliyaview 45169 -14.3990 -120.7677 586.493.6943 x140 jacynthe.com Abernathy Group Implemented secondary concept e-enable extensible e-tailers +9 Glenna Reichert Delphine Chaim_McDermott@dana.io Dayna Park Suite 449 Bartholomebury 76495-3109 24.6463 -168.8889 (775)976-6794 x41206 conrad.com Yost and Sons Switchable contextually-based project aggregate real-time technologies +10 Clementina DuBuque Moriah.Stanton Rey.Padberg@karina.biz Kattie Turnpike Suite 198 Lebsackbury 31428-2261 -38.2386 57.2232 024-648-3804 ambrose.net Hoeger LLC Centralized empowering task-force target end-to-end models +DROP TABLE t1; diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 6a0c9db27b3..575c903bbbc 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -374,8 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); Warnings: Level Warning Code 1105 -Message Com error: Unable to save character to 'iso-8859-1' encoding.
- +Message warning about characters outside of iso-8859-1 INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; node &<>"' diff --git a/storage/connect/mysql-test/connect/r/xml2.result b/storage/connect/mysql-test/connect/r/xml2.result index f7bbc17c8a0..891c6e6f8dd 100644 --- a/storage/connect/mysql-test/connect/r/xml2.result +++ b/storage/connect/mysql-test/connect/r/xml2.result @@ -87,9 +87,9 @@ DROP TABLE t1; # Testing mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) FIELD_FORMAT='@', -LANG CHAR(2) FIELD_FORMAT='@', -SUBJECT CHAR(32) FIELD_FORMAT='@', +ISBN CHAR(15) XPATH='@', +LANG CHAR(2) XPATH='@', +SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -120,9 +120,9 @@ DROP TABLE t1; # Testing INSERT on mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) FIELD_FORMAT='@', -LANG CHAR(2) FIELD_FORMAT='@', -SUBJECT CHAR(32) FIELD_FORMAT='@', +ISBN CHAR(15) XPATH='@', +LANG CHAR(2) XPATH='@', +SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -207,18 +207,18 @@ DROP TABLE t1; # Testing XPath # CREATE TABLE t1 ( -isbn CHAR(15) FIELD_FORMAT='@ISBN', -language CHAR(2) FIELD_FORMAT='@LANG', -subject CHAR(32) FIELD_FORMAT='@SUBJECT', -authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', -authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', -title CHAR(32) FIELD_FORMAT='TITLE', -translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', -tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', -publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', -location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', -year INT(4) FIELD_FORMAT='DATEPUB' +isbn CHAR(15) XPATH='@ISBN', +language CHAR(2) XPATH='@LANG', +subject CHAR(32) XPATH='@SUBJECT', +authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', +authorln CHAR(20) XPATH='AUTHOR/LASTNAME', +title CHAR(32) XPATH='TITLE', +translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', +tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', +tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', +publisher CHAR(20) XPATH='PUBLISHER/NAME', +location CHAR(20) XPATH='PUBLISHER/PLACE', +year INT(4) XPATH='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; @@ -260,7 +260,7 @@ DROP TABLE t1; # CREATE TABLE t1 ( -isbn CHAR(15) FIELD_FORMAT='@isbn' +isbn CHAR(15) XPATH='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/r/xml2_html.result b/storage/connect/mysql-test/connect/r/xml2_html.result index 143f46529f6..499108b724d 100644 --- a/storage/connect/mysql-test/connect/r/xml2_html.result +++ b/storage/connect/mysql-test/connect/r/xml2_html.result @@ -5,9 +5,9 @@ SET NAMES utf8; # Testing HTML like XML file # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/r/xml2_mult.result b/storage/connect/mysql-test/connect/r/xml2_mult.result index 07c86d961e1..0146baa89c0 100644 --- a/storage/connect/mysql-test/connect/r/xml2_mult.result +++ b/storage/connect/mysql-test/connect/r/xml2_mult.result @@ -5,9 +5,9 @@ SET NAMES utf8; # Testing expanded values # CREATE TABLE `bookstore` ( -`category` CHAR(16) NOT NULL FIELD_FORMAT='@', +`category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, -`lang` char(2) NOT NULL FIELD_FORMAT='title/@', +`lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/r/xml2_zip.result b/storage/connect/mysql-test/connect/r/xml2_zip.result index f176149c53f..e743af32418 100644 --- a/storage/connect/mysql-test/connect/r/xml2_zip.result +++ b/storage/connect/mysql-test/connect/r/xml2_zip.result @@ -4,20 +4,20 @@ Warning 1105 No file name. Table will use t1.xml # Testing zipped XML tables # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; ISBN 9782212090819 LANG fr @@ -69,7 +69,7 @@ PUBLISHER_PLACE Paris DATEPUB 2003 CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=libxml2'; +OPTION_LIST='depth=0,xmlsup=libxml2'; SELECT * FROM t2; ISBN 9782212090819 LANG fr diff --git a/storage/connect/mysql-test/connect/r/xml_html.result b/storage/connect/mysql-test/connect/r/xml_html.result index 4b984a49901..308c67ffc28 100644 --- a/storage/connect/mysql-test/connect/r/xml_html.result +++ b/storage/connect/mysql-test/connect/r/xml_html.result @@ -3,9 +3,9 @@ SET NAMES utf8; # Testing HTML like XML file # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/r/xml_mult.result b/storage/connect/mysql-test/connect/r/xml_mult.result index c786a80819c..9cdc36dea6b 100644 --- a/storage/connect/mysql-test/connect/r/xml_mult.result +++ b/storage/connect/mysql-test/connect/r/xml_mult.result @@ -3,9 +3,9 @@ SET NAMES utf8; # Testing expanded values # CREATE TABLE `bookstore` ( -`category` CHAR(16) NOT NULL FIELD_FORMAT='@', +`category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, -`lang` char(2) NOT NULL FIELD_FORMAT='title/@', +`lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result index f7790e4cfff..5f17249b390 100644 --- a/storage/connect/mysql-test/connect/r/xml_zip.result +++ b/storage/connect/mysql-test/connect/r/xml_zip.result @@ -2,20 +2,20 @@ # Testing zipped XML tables # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; ISBN 9782212090819 LANG fr @@ -67,7 +67,7 @@ PUBLISHER_PLACE Paris DATEPUB 2003 CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=domdoc'; +OPTION_LIST='depth=0,xmlsup=domdoc'; SELECT * FROM t2; ISBN 9782212090819 LANG fr diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result index c81546a4689..c696252ca43 100644 --- a/storage/connect/mysql-test/connect/r/zip.result +++ b/storage/connect/mysql-test/connect/r/zip.result @@ -171,32 +171,32 @@ DROP TABLE t1,t2,t3,t4; # CREATE TABLE t1 ( _id INT(2) NOT NULL, -name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +name_first CHAR(9) NOT NULL JPATH='$.name.first', +name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +name_last CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth CHAR(20) DEFAULT NULL, death CHAR(20) DEFAULT NULL, -contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs', -awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award', -awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year', -awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by' +contribs CHAR(50) NOT NULL JPATH='$.contribs', +awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award', +awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year', +awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES; SELECT * FROM t1; _id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by -1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society -2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM -3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association -4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association -5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association +1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran, ALGOL, Backus-Naur Form, FP W.W. McDowell Award 1967 IEEE Computer Society +2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp, Artificial Intelligence, ALGOL Turing Award 1971 ACM +3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC, compiler, FLOW-MATIC, COBOL Computer Sciences Man of the Year 1969 Data Processing Management Association +4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association +5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association 6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation -7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM +7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX, C Turing Award 1983 ACM 8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation 9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist 10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1 -OPTION_LIST='LEVEL=5'; +OPTION_LIST='DEPTH=5'; SELECT * FROM t2; _id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by 1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society @@ -211,16 +211,16 @@ _id name_first name_aka name_last title birth death contribs awards_award awards 10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL CREATE TABLE t3 ( _id INT(2) NOT NULL, -firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +firstname CHAR(9) NOT NULL JPATH='$.name.first', +aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +lastname CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", -contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]', -award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award', -year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year', -`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by' +contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]', +award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award', +year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year', +`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by' ) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES; SELECT * FROM t3 WHERE _id = 1; _id firstname aka lastname title birth death contribs award year by diff --git a/storage/connect/mysql-test/connect/t/alter_xml.test b/storage/connect/mysql-test/connect/t/alter_xml.test index 8b2164d5548..4c2e1670f4c 100644 --- a/storage/connect/mysql-test/connect/t/alter_xml.test +++ b/storage/connect/mysql-test/connect/t/alter_xml.test @@ -21,7 +21,7 @@ SELECT * FROM t2; --echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. --echo # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; SHOW CREATE TABLE t1; SELECT * FROM t2; diff --git a/storage/connect/mysql-test/connect/t/alter_xml2.test b/storage/connect/mysql-test/connect/t/alter_xml2.test index d67c80c4e9f..ec4065baa47 100644 --- a/storage/connect/mysql-test/connect/t/alter_xml2.test +++ b/storage/connect/mysql-test/connect/t/alter_xml2.test @@ -21,7 +21,7 @@ SELECT * FROM t2; --echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. --echo # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; SHOW CREATE TABLE t1; SELECT * FROM t2; diff --git a/storage/connect/mysql-test/connect/t/bson.test b/storage/connect/mysql-test/connect/t/bson.test new file mode 100644 index 00000000000..ab38cab73fc --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson.test @@ -0,0 +1,294 @@ +--source include/not_embedded.inc +--source include/have_partition.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json +--copy_file $MTR_SUITE_DIR/std_data/bib0.json $MYSQLD_DATADIR/test/bib0.json +--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json + +--echo # +--echo # Testing doc samples +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + LANG CHAR(2), + SUBJECT CHAR(32), + AUTHOR CHAR(64), + TITLE CHAR(32), + TRANSLATION CHAR(32), + TRANSLATOR CHAR(80), + PUBLISHER CHAR(32), + DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + + +--echo # +--echo # Testing Jpath. Get the number of authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + Authors INT(2) JPATH='$.AUTHOR[#]', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Concatenates the authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Testing expanding authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; +SELECT * FROM t1 WHERE ISBN = '9782212090819'; + +--echo # +--echo # To add an author a new table must be created +--echo # +CREATE TABLE t2 ( +FIRSTNAME CHAR(32), +LASTNAME CHAR(32)) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; +SELECT * FROM t2; +INSERT INTO t2 VALUES('Charles','Dickens'); +SELECT * FROM t1; +DROP TABLE t1; +DROP TABLE t2; + +--echo # +--echo # Check the biblio file has the good format +--echo # +CREATE TABLE t1 +( + line char(255) +) +ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Testing a pretty=0 file +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15) NOT NULL, + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', + TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', + TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB', + INDEX IX(ISBN) +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; +SHOW INDEX FROM t1; +SELECT * FROM t1; +DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; +--error ER_GET_ERRMSG +UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; +DROP TABLE t1; + +--echo # +--echo # A file with 2 arrays +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Now it can be fully expanded +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +#--error ER_GET_ERRMSG +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # A table showing many calculated results +--echo # +CREATE TABLE t1 ( +WHO CHAR(12) NOT NULL, +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Expand expense in 3 one week tables +--echo # +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t2; + +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t3; + +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t4; + +--echo # +--echo # The expanded table is made as a TBL table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32), +AMOUNT DOUBLE(8,2)) +ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; +SELECT * FROM t1; +DROP TABLE t1, t2, t3, t4; + +--echo # +--echo # Three partial JSON tables +--echo # +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; +SELECT * FROM t2; + +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; +SELECT * FROM t3; + +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; +SELECT * FROM t4; + +--echo # +--echo # The complete table can be a multiple JSON table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; +SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; +DROP TABLE t1; + +--echo # +--echo # Or also a partition JSON table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; +ALTER TABLE t1 +PARTITION BY LIST COLUMNS(WEEK) ( +PARTITION `3` VALUES IN(3), +PARTITION `4` VALUES IN(4), +PARTITION `5` VALUES IN(5)); +SHOW WARNINGS; +SELECT * FROM t1; +SELECT * FROM t1 WHERE WEEK = 4; +DROP TABLE t1, t2, t3, t4; + +# +# Clean up +# +--remove_file $MYSQLD_DATADIR/test/biblio.json +--remove_file $MYSQLD_DATADIR/test/bib0.dnx +--remove_file $MYSQLD_DATADIR/test/bib0.json +--remove_file $MYSQLD_DATADIR/test/expense.json +--remove_file $MYSQLD_DATADIR/test/mulexp3.json +--remove_file $MYSQLD_DATADIR/test/mulexp4.json +--remove_file $MYSQLD_DATADIR/test/mulexp5.json diff --git a/storage/connect/mysql-test/connect/t/bson_java_2.test b/storage/connect/mysql-test/connect/t/bson_java_2.test new file mode 100644 index 00000000000..2188d9c2c91 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_java_2.test @@ -0,0 +1,14 @@ +-- source jdbconn.inc +-- source mongo.inc + +--disable_query_log +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar'; +set connect_json_all_path=0; +--enable_query_log +let $DRV= Java; +let $VERS= 2; +let $TYPE= BSON; +let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096; + +-- source mongo_test.inc +-- source jdbconn_cleanup.inc diff --git a/storage/connect/mysql-test/connect/t/bson_java_3.test b/storage/connect/mysql-test/connect/t/bson_java_3.test new file mode 100644 index 00000000000..e7dd90b3563 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_java_3.test @@ -0,0 +1,14 @@ +-- source jdbconn.inc +-- source mongo.inc + +--disable_query_log +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar'; +set connect_json_all_path=0; +--enable_query_log +let $DRV= Java; +let $VERS= 3; +let $TYPE= BSON; +let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096; + +-- source mongo_test.inc +-- source jdbconn_cleanup.inc diff --git a/storage/connect/mysql-test/connect/t/bson_mongo_c.test b/storage/connect/mysql-test/connect/t/bson_mongo_c.test new file mode 100644 index 00000000000..938d77c7c95 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_mongo_c.test @@ -0,0 +1,10 @@ +-- source mongo.inc + +let $DRV= C; +let $VERS= 0; +let $PROJ= {"projection":; +let $ENDP= }; +let $TYPE= BSON; +let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=1024; + +-- source mongo_test.inc diff --git a/storage/connect/mysql-test/connect/t/bson_udf.inc b/storage/connect/mysql-test/connect/t/bson_udf.inc new file mode 100644 index 00000000000..c4722722ef7 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_udf.inc @@ -0,0 +1,72 @@ +--disable_query_log +# +# Check if server has support for loading plugins +# +if (`SELECT @@have_dynamic_loading != 'YES'`) { + --skip UDF requires dynamic loading +} +if (!$HA_CONNECT_SO) { + --skip Needs a dynamically built ha_connect.so +} + +--eval CREATE FUNCTION bson_test RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonvalue RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_make_array RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_array_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_make_object RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_key RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_list RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonset_def_prec RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_def_prec RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonset_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bson_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bson_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonlocate RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_contains RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsoncontains_path RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_get_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_string RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_int RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_real RETURNS REAL SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_set_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_update_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_file RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_serialize RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bfile_make RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bfile_convert RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bfile_bjson RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_make_array RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_array_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bbin_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bbin_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_make_object RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_key RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_list RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_get_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_set_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_update_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_file RETURNS STRING SONAME '$HA_CONNECT_SO'; + +--enable_query_log + diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test new file mode 100644 index 00000000000..0da2de38864 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_udf.test @@ -0,0 +1,282 @@ +--source bson_udf.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json +--copy_file $MTR_SUITE_DIR/std_data/employee.dat $MYSQLD_DATADIR/test/employee.dat + +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5; + +--echo # +--echo # Test UDF's with constant arguments +--echo # +--error ER_CANT_INITIALIZE_UDF +SELECT BsonValue(56, 3.1416, 'foo', NULL); +SELECT BsonValue(3.1416); +SELECT BsonValue(-80); +SELECT BsonValue('foo'); +SELECT BsonValue(9223372036854775807); +SELECT BsonValue(NULL); +SELECT BsonValue(TRUE); +SELECT BsonValue(FALSE); +SELECT BsonValue(); +SELECT BsonValue('[11, 22, 33]' json_) FROM t1; +# +SELECT Bson_Make_Array(); +SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL); +SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE); +# +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array; +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array; +#--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add(BsonValue('one value'), 'One more'); +#--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add('one value', 'One more'); +SELECT Bson_Array_Add('one value' json_, 'One more'); +#--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add(5 json_, 'One more'); +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0); +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array; +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9); +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1); +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1); +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]'); +# +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array; +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1; +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1; +SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1; +SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array; +# +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0); +SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2); +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2'); +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2); /* WARNING VOID */ +# +SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL); +SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty); +SELECT Bson_Make_Object(); +SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL); +SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL); +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL); +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty'); +# +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color); +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price); +SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1); +# +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc'); +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose'); +# +SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List"; +SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List"; +SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List"; + +--echo # +--echo # Test UDF's with column arguments +--echo # +SELECT Bsonset_Def_Prec(2); +CREATE TABLE t2 +( + ISBN CHAR(15), + LANG CHAR(2), + SUBJECT CHAR(32), + AUTHOR CHAR(64), + TITLE CHAR(32), + TRANSLATION CHAR(32), + TRANSLATOR CHAR(80), + PUBLISHER CHAR(32), + DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; + +SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2; +SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2; +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2; +SELECT Bson_Array_Grp(TITLE) FROM t2; + +CREATE TABLE t3 ( + SERIALNO CHAR(5) NOT NULL, + NAME VARCHAR(12) NOT NULL FLAG=6, + SEX SMALLINT(1) NOT NULL, + TITLE VARCHAR(15) NOT NULL FLAG=20, + MANAGER CHAR(5) DEFAULT NULL, + DEPARTMENT CHAr(4) NOT NULL FLAG=41, + SECRETARY CHAR(5) DEFAULT NULL FLAG=46, + SALARY DOUBLE(8,2) NOT NULL FLAG=52 +) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1; + +SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT'; +SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT; +#SET connect_json_grp_size=30; Deprecated +SELECT BsonSet_Grp_Size(30); +SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title; +SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE; +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Object_Grp(SALARY) FROM t3; +SELECT Bson_Object_Grp(NAME, SALARY) FROM t3; +SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Array_Grp(NAME) FROM t3; +# +SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318; +SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318; + +--echo # +--echo # Test value getting UDF's +--echo # +SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3; +SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3; +SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3; +SET @j1 = '[45,28,36,45,89]'; +SELECT BsonGet_String(@j1,'1'); +SELECT BsonGet_String(@j1 json_,'3'); +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3'); +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum"; +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0'); +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*'); +SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc'); +SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}'; +SELECT BsonGet_String(@j2 json_,'truc'); +SELECT BsonGet_String(@j2,'truc'); +SELECT BsonGet_String(@j2,'chose'); +SELECT BsonGet_String(NULL json_, NULL); /* NULL WARNING */ +SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; +# +SELECT BsonGet_Int(@j1, '4'); +SELECT BsonGet_Int(@j1, '[#]'); +SELECT BsonGet_Int(@j1, '[+]'); +SELECT BsonGet_Int(@j1 json_, '3'); +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3'); +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]'); +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]'); +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1'); +SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty'); +SELECT BsonGet_Int(@j2 json_, 'price'); +SELECT BsonGet_Int(@j2, 'qty'); +SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum; +SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department; +# +SELECT BsonGet_Real(@j1, '2'); +SELECT BsonGet_Real(@j1 json_, '3', 2); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3'); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]'); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]'); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]'); +SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price'); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty'); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price'); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; + +--echo # +--echo # Documentation examples +--echo # +SELECT + BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank", + BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number", + BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat", + BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum", + BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg"; +SELECT + BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String", + BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int", + BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real"; +SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real"; + +--echo # +--echo # Testing Locate +--echo # +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin'); +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56); +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416); +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose'); +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path; +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path; +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path; +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path; +SELECT BsonLocate('[45,28,36,45,89]',36); +SELECT BsonLocate('[45,28,36,45,89]' json_,28.0); +SELECT Bson_Locate_All('[45,28,36,45,89]',10); +SELECT Bson_Locate_All('[45,28,36,45,89]',45); +SELECT Bson_Locate_All('[[45,28],36,45,89]',45); +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45); +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]')); +SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1; +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1; +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL; +SELECT Bson_Locate_All('[45,28,[36,45,89]]',45); +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0)); +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0); +SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_); +SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_); +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths"; +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_); +SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs"; +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2); +SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0'); +SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab'); +SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab'); + +--echo # +--echo # Testing json files +--echo # +SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]}, +{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}, +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]}, +{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile; +SELECT Bfile_Make('test/fx.json', 1); +SELECT Bfile_Make('test/fx.json' jfile_); +SELECT Bfile_Make(Bbin_File('test/fx.json'), 0); +SELECT Bson_File('test/fx.json', 1); +SELECT Bson_File('test/fx.json', 2); +SELECT Bson_File('test/fx.json', 0); +SELECT Bson_File('test/fx.json', '0'); +SELECT Bson_File('test/fx.json', '[?]'); +SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*'); +SELECT BsonGet_String(Bson_File('test/fx.json'), '1'); +SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage; +SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price; +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings'); +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings'); +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1); +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0); +SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1); +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin); +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size'); +SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size'); +SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size')); + +--echo # +--echo # Testing new functions +--echo # +SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result"; +SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result"; +SELECT +Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set", +Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert", +Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update"; +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux'); +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]'); +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +SELECT BsonSet_Grp_Size(10); + +# +# Clean up +# +--source bson_udf2.inc +--remove_file $MYSQLD_DATADIR/test/biblio.json +--remove_file $MYSQLD_DATADIR/test/employee.dat +--remove_file $MYSQLD_DATADIR/test/fx.json + diff --git a/storage/connect/mysql-test/connect/t/bson_udf2.inc b/storage/connect/mysql-test/connect/t/bson_udf2.inc new file mode 100644 index 00000000000..d06d7fac435 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_udf2.inc @@ -0,0 +1,63 @@ +--disable_query_log + +DROP FUNCTION bson_test; +DROP FUNCTION bsonvalue; +DROP FUNCTION bson_make_array; +DROP FUNCTION bson_array_add_values; +DROP FUNCTION bson_array_add; +DROP FUNCTION bson_array_delete; +DROP FUNCTION bson_make_object; +DROP FUNCTION bson_object_nonull; +DROP FUNCTION bson_object_key; +DROP FUNCTION bson_object_add; +DROP FUNCTION bson_object_delete; +DROP FUNCTION bson_object_list; +DROP FUNCTION bson_object_values; +DROP FUNCTION bsonset_def_prec; +DROP FUNCTION bsonget_def_prec; +DROP FUNCTION bsonset_grp_size; +DROP FUNCTION bsonget_grp_size; +DROP FUNCTION bson_array_grp; +DROP FUNCTION bson_object_grp; +DROP FUNCTION bsonlocate; +DROP FUNCTION bson_locate_all; +DROP FUNCTION bson_contains; +DROP FUNCTION bsoncontains_path; +DROP FUNCTION bson_item_merge; +DROP FUNCTION bson_get_item; +DROP FUNCTION bson_delete_item; +DROP FUNCTION bsonget_string; +DROP FUNCTION bsonget_int; +DROP FUNCTION bsonget_real; +DROP FUNCTION bson_set_item; +DROP FUNCTION bson_insert_item; +DROP FUNCTION bson_update_item; +DROP FUNCTION bson_serialize; +DROP FUNCTION bson_file; +DROP FUNCTION bfile_make; +DROP FUNCTION bfile_convert; +DROP FUNCTION bfile_bjson; +DROP FUNCTION bbin_make_array; +DROP FUNCTION bbin_array_add; +DROP FUNCTION bbin_array_add_values; +DROP FUNCTION bbin_array_delete; +DROP FUNCTION bbin_array_grp; +DROP FUNCTION bbin_object_grp; +DROP FUNCTION bbin_make_object; +DROP FUNCTION bbin_object_nonull; +DROP FUNCTION bbin_object_key; +DROP FUNCTION bbin_object_add; +DROP FUNCTION bbin_object_delete; +DROP FUNCTION bbin_object_list; +DROP FUNCTION bbin_object_values; +DROP FUNCTION bbin_get_item; +DROP FUNCTION bbin_set_item; +DROP FUNCTION bbin_insert_item; +DROP FUNCTION bbin_update_item; +DROP FUNCTION bbin_item_merge; +DROP FUNCTION bbin_delete_item; +DROP FUNCTION bbin_locate_all; +DROP FUNCTION bbin_file; + +--enable_query_log + diff --git a/storage/connect/mysql-test/connect/t/ini_grant.result b/storage/connect/mysql-test/connect/t/ini_grant.result deleted file mode 100644 index 96d5e192c7d..00000000000 --- a/storage/connect/mysql-test/connect/t/ini_grant.result +++ /dev/null @@ -1,89 +0,0 @@ -# -# Checking FILE privileges -# -set sql_mode=""; -GRANT ALL PRIVILEGES ON *.* TO user@localhost; -REVOKE FILE ON *.* FROM user@localhost; -set sql_mode=default; -connect user,localhost,user,,; -connection user; -SELECT user(); -user() -user@localhost -CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI; -Warnings: -Warning 1105 No file name. Table will use t1.ini -INSERT INTO t1 VALUES ('sec1','val1'); -SELECT * FROM t1; -sec val -sec1 val1 -UPDATE t1 SET val='val11'; -SELECT * FROM t1; -sec val -sec1 val11 -DELETE FROM t1; -SELECT * FROM t1; -sec val -INSERT INTO t1 VALUES('sec2','val2'); -TRUNCATE TABLE t1; -SELECT * FROM t1; -sec val -CREATE VIEW v1 AS SELECT * FROM t1; -SELECT * FROM v1; -sec val -DROP VIEW v1; -DROP TABLE t1; -CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI FILE_NAME='t1.EXT'; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -connection default; -SELECT user(); -user() -root@localhost -CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI FILE_NAME='t1.EXT'; -INSERT INTO t1 VALUES ('sec1','val1'); -connection user; -SELECT user(); -user() -user@localhost -INSERT INTO t1 VALUES ('sec2','val2'); -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -SELECT * FROM t1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -UPDATE t1 SET val='val11'; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -DELETE FROM t1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -TRUNCATE TABLE t1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -ALTER TABLE t1 READONLY=1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -DROP TABLE t1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -CREATE VIEW v1 AS SELECT * FROM t1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -# Testing a VIEW created with FILE privileges but accessed with no FILE -connection default; -SELECT user(); -user() -root@localhost -CREATE SQL SECURITY INVOKER VIEW v1 AS SELECT * FROM t1; -connection user; -SELECT user(); -user() -user@localhost -SELECT * FROM v1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -INSERT INTO v1 VALUES ('sec3','val3'); -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -UPDATE v1 SET val='val11'; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -DELETE FROM v1; -ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation -disconnect user; -connection default; -DROP VIEW v1; -DROP TABLE t1; -DROP USER user@localhost; -# -# Checking FILE privileges: done -# diff --git a/storage/connect/mysql-test/connect/t/jdbc_oracle.test b/storage/connect/mysql-test/connect/t/jdbc_oracle.test index 10cb7a7b77d..1316352d4f5 100644 --- a/storage/connect/mysql-test/connect/t/jdbc_oracle.test +++ b/storage/connect/mysql-test/connect/t/jdbc_oracle.test @@ -8,20 +8,20 @@ CREATE TABLE t2 ( number int(5) not null flag=1, message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager,Execsrc=1'; +OPTION_LIST='User=system,Password=Choupy01,Execsrc=1'; SELECT * FROM t2 WHERE command = 'drop table employee'; SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))'; SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)"; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1 WHERE table_name='employee'; DROP TABLE t1; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1; DROP TABLE t1; @@ -32,7 +32,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP HOST 'jdbc:oracle:thin:@localhost:1521:xe', DATABASE 'SYSTEM', USER 'system', -PASSWORD 'manager', +PASSWORD 'Choupy01', PORT 0, SOCKET '', OWNER 'SYSTEM'); diff --git a/storage/connect/mysql-test/connect/t/json.test b/storage/connect/mysql-test/connect/t/json.test index 018489525f7..8b42ef9cfab 100644 --- a/storage/connect/mysql-test/connect/t/json.test +++ b/storage/connect/mysql-test/connect/t/json.test @@ -35,15 +35,15 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', - Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB' + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + Authors INT(2) JPATH='$.AUTHOR[#]', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -55,16 +55,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME', - AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', - Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB' + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -76,16 +76,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', - AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', - Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB' + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -122,17 +122,17 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15) NOT NULL, - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', - AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX', - TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME', - TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB', + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', + TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', + TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB', INDEX IX(ISBN) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; @@ -148,9 +148,9 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; DROP TABLE t1; @@ -160,9 +160,9 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; #--error ER_GET_ERRMSG SELECT * FROM t1; @@ -173,14 +173,14 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( WHO CHAR(12) NOT NULL, -WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER', -SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT', -SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT', -AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT', -SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT', -AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT', -AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT', -AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT') +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; DROP TABLE t1; @@ -190,25 +190,25 @@ DROP TABLE t1; --echo # CREATE TABLE t2 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t2; CREATE TABLE t3 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t3; CREATE TABLE t4 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t4; @@ -230,24 +230,24 @@ DROP TABLE t1, t2, t3, t4; CREATE TABLE t2 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json'; SELECT * FROM t2; CREATE TABLE t3 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json'; SELECT * FROM t3; CREATE TABLE t4 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json'; SELECT * FROM t4; @@ -257,8 +257,8 @@ SELECT * FROM t4; CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1; SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; DROP TABLE t1; @@ -269,8 +269,8 @@ DROP TABLE t1; CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json'; ALTER TABLE t1 PARTITION BY LIST COLUMNS(WEEK) ( diff --git a/storage/connect/mysql-test/connect/t/json_java_2.test b/storage/connect/mysql-test/connect/t/json_java_2.test index 2f64d8e2eed..03202828bb1 100644 --- a/storage/connect/mysql-test/connect/t/json_java_2.test +++ b/storage/connect/mysql-test/connect/t/json_java_2.test @@ -3,6 +3,7 @@ --disable_query_log eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar'; +set connect_json_all_path=0; --enable_query_log let $DRV= Java; let $VERS= 2; diff --git a/storage/connect/mysql-test/connect/t/json_java_3.test b/storage/connect/mysql-test/connect/t/json_java_3.test index cee8343772a..238808a833f 100644 --- a/storage/connect/mysql-test/connect/t/json_java_3.test +++ b/storage/connect/mysql-test/connect/t/json_java_3.test @@ -3,6 +3,7 @@ --disable_query_log eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar'; +set connect_json_all_path=0; --enable_query_log let $DRV= Java; let $VERS= 3; diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc index 357fa55240b..6e7c78e81ac 100644 --- a/storage/connect/mysql-test/connect/t/mongo_test.inc +++ b/storage/connect/mysql-test/connect/t/mongo_test.inc @@ -1,9 +1,10 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; --echo # --echo # Test the MONGO table type --echo # -eval CREATE TABLE t1 (Document varchar(1024) field_format='*') +eval CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants $CONN OPTION_LIST='Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -13,7 +14,7 @@ DROP TABLE t1; --echo # Test catfunc --echo # eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN; +OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN; SELECT * from t1; DROP TABLE t1; @@ -36,7 +37,7 @@ DROP TABLE t1; --echo # Test discovery --echo # eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants -OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8; SHOW CREATE TABLE t1; SELECT * FROM t1 LIMIT 5; DROP TABLE t1; @@ -58,12 +59,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN; @@ -125,6 +126,10 @@ IF ($TYPE == JSON) { SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; } +IF ($TYPE == BSON) +{ +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +} DROP TABLE t1; --echo # @@ -156,8 +161,8 @@ DROP TABLE t1; eval CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, - loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', - loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', + loc_0 double(12,6) NOT NULL `JPATH`='loc.0', + loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=$TYPE TABNAME='cities' @@ -181,11 +186,11 @@ DROP TABLE t1; eval CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, - prices_0 INT(6) FIELD_FORMAT='prices.0', - prices_1 INT(6) FIELD_FORMAT='prices.1', - prices_2 INT(6) FIELD_FORMAT='prices.2', - prices_3 INT(6) FIELD_FORMAT='prices.3', - prices_4 INT(6) FIELD_FORMAT='prices.4') + prices_0 INT(6) JPATH='prices.0', + prices_1 INT(6) JPATH='prices.1', + prices_2 INT(6) JPATH='prices.2', + prices_3 INT(6) JPATH='prices.3', + prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/t/odbc_oracle.test b/storage/connect/mysql-test/connect/t/odbc_oracle.test index 9de742a2647..18d29f69f1a 100644 --- a/storage/connect/mysql-test/connect/t/odbc_oracle.test +++ b/storage/connect/mysql-test/connect/t/odbc_oracle.test @@ -78,42 +78,42 @@ SET NAMES utf8; --echo # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.%'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.%'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; DROP TABLE t1; @@ -127,7 +127,7 @@ DROP TABLE t1; --echo # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns; # Disable warnings to avoid "Result limited to 20000 lines" --disable_warnings @@ -137,7 +137,7 @@ DROP TABLE t1; --echo # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.%'; # Disable warnings to avoid "Result limited to 20000 lines" --disable_warnings @@ -146,20 +146,20 @@ SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (limited with WHERE) -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; @@ -172,7 +172,7 @@ DROP TABLE t1; --echo # Table "T1" in the default schema ("MTR") CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='T1'; SHOW CREATE TABLE t1; SELECT * FROM t1 ORDER BY A; @@ -189,7 +189,7 @@ DROP TABLE t1; --echo # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T1'; SHOW CREATE TABLE t1; SELECT * FROM t1; @@ -197,7 +197,7 @@ DROP TABLE t1; --echo # View "V1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.V1'; SHOW CREATE TABLE t1; SELECT * FROM t1; @@ -214,7 +214,7 @@ DROP TABLE t1; --echo # Table "T2" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T2'; SHOW CREATE TABLE t1; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/t/rest.inc b/storage/connect/mysql-test/connect/t/rest.inc new file mode 100644 index 00000000000..6848e4b6965 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/rest.inc @@ -0,0 +1,17 @@ +--disable_query_log +--error 0,ER_UNKNOWN_ERROR +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='users.json' +HTTP='http://jsonplaceholder.typicode.com/users'; + +if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1' + AND ENGINE='CONNECT' + AND CREATE_OPTIONS LIKE "%`table_type`='JSON'%"`) +{ + DROP TABLE IF EXISTS t1; + Skip Need Curl or Casablanca; +} +DROP TABLE t1; +--enable_query_log + diff --git a/storage/connect/mysql-test/connect/t/rest.test b/storage/connect/mysql-test/connect/t/rest.test new file mode 100644 index 00000000000..67066ed4639 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/rest.test @@ -0,0 +1,17 @@ +--source rest.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--echo # +--echo # Testing REST query +--echo # +CREATE TABLE t1 +ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json' +HTTP='http://jsonplaceholder.typicode.com/users'; +SELECT * FROM t1; +DROP TABLE t1; + +# +# Clean up +# +--remove_file $MYSQLD_DATADIR/test/users.json diff --git a/storage/connect/mysql-test/connect/t/xml.test b/storage/connect/mysql-test/connect/t/xml.test index 0fdf8e90b6e..e837ec79604 100644 --- a/storage/connect/mysql-test/connect/t/xml.test +++ b/storage/connect/mysql-test/connect/t/xml.test @@ -300,6 +300,7 @@ CREATE TABLE t1 (node VARCHAR(50)) ENGINE=connect TABLE_TYPE=xml FILE_NAME='t1.xml' OPTION_LIST='xmlsup=domdoc,rownode=line,encoding=iso-8859-1'; INSERT INTO t1 VALUES (_latin1 0xC0C1C2C3); +--replace_regex /.*iso-8859-1.*/warning about characters outside of iso-8859-1/ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; diff --git a/storage/connect/mysql-test/connect/t/xml2.test b/storage/connect/mysql-test/connect/t/xml2.test index 7bbc3dbd87c..9c5f685d399 100644 --- a/storage/connect/mysql-test/connect/t/xml2.test +++ b/storage/connect/mysql-test/connect/t/xml2.test @@ -77,9 +77,9 @@ DROP TABLE t1; --echo # Testing mixed tag and attribute values --echo # CREATE TABLE t1 ( - ISBN CHAR(15) FIELD_FORMAT='@', - LANG CHAR(2) FIELD_FORMAT='@', - SUBJECT CHAR(32) FIELD_FORMAT='@', + ISBN CHAR(15) XPATH='@', + LANG CHAR(2) XPATH='@', + SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -98,9 +98,9 @@ DROP TABLE t1; --copy_file $MTR_SUITE_DIR/std_data/xsample.xml $MYSQLD_DATADIR/test/xsample2.xml --chmod 0644 $MYSQLD_DATADIR/test/xsample2.xml CREATE TABLE t1 ( - ISBN CHAR(15) FIELD_FORMAT='@', - LANG CHAR(2) FIELD_FORMAT='@', - SUBJECT CHAR(32) FIELD_FORMAT='@', + ISBN CHAR(15) XPATH='@', + LANG CHAR(2) XPATH='@', + SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -123,18 +123,18 @@ DROP TABLE t1; --echo # Testing XPath --echo # CREATE TABLE t1 ( - isbn CHAR(15) FIELD_FORMAT='@ISBN', - language CHAR(2) FIELD_FORMAT='@LANG', - subject CHAR(32) FIELD_FORMAT='@SUBJECT', - authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', - authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', - title CHAR(32) FIELD_FORMAT='TITLE', - translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', - tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', - tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', - publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', - location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', - year INT(4) FIELD_FORMAT='DATEPUB' + isbn CHAR(15) XPATH='@ISBN', + language CHAR(2) XPATH='@LANG', + subject CHAR(32) XPATH='@SUBJECT', + authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', + authorln CHAR(20) XPATH='AUTHOR/LASTNAME', + title CHAR(32) XPATH='TITLE', + translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', + tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', + tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', + publisher CHAR(20) XPATH='PUBLISHER/NAME', + location CHAR(20) XPATH='PUBLISHER/PLACE', + year INT(4) XPATH='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; @@ -150,8 +150,8 @@ DROP TABLE t1; #--echo # Relative paths are not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) FIELD_FORMAT='//FIRSTNAME', -# authorln CHAR(20) FIELD_FORMAT='//LASTNAME' +# authorfn CHAR(20) XPATH='//FIRSTNAME', +# authorln CHAR(20) XPATH='//LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -165,8 +165,8 @@ DROP TABLE t1; #--echo # Absolute path is not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', -# authorln CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/LASTNAME' +# authorfn CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', +# authorln CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -178,7 +178,7 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( - isbn CHAR(15) FIELD_FORMAT='@isbn' + isbn CHAR(15) XPATH='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/t/xml2_html.test b/storage/connect/mysql-test/connect/t/xml2_html.test index 1c84b46ec38..2f4fc50e5e6 100644 --- a/storage/connect/mysql-test/connect/t/xml2_html.test +++ b/storage/connect/mysql-test/connect/t/xml2_html.test @@ -11,9 +11,9 @@ SET NAMES utf8; --echo # Testing HTML like XML file --echo # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/t/xml2_mult.test b/storage/connect/mysql-test/connect/t/xml2_mult.test index cd83827fe34..e9914c71aad 100644 --- a/storage/connect/mysql-test/connect/t/xml2_mult.test +++ b/storage/connect/mysql-test/connect/t/xml2_mult.test @@ -15,9 +15,9 @@ SET NAMES utf8; --echo # Testing expanded values --echo # CREATE TABLE `bookstore` ( - `category` CHAR(16) NOT NULL FIELD_FORMAT='@', + `category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, - `lang` char(2) NOT NULL FIELD_FORMAT='title/@', + `lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/t/xml2_zip.test b/storage/connect/mysql-test/connect/t/xml2_zip.test index d8c7894f861..df69f9dace3 100644 --- a/storage/connect/mysql-test/connect/t/xml2_zip.test +++ b/storage/connect/mysql-test/connect/t/xml2_zip.test @@ -11,26 +11,26 @@ let $MYSQLD_DATADIR= `select @@datadir`; --echo # Testing zipped XML tables --echo # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; #testing discovery CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=libxml2'; +OPTION_LIST='depth=0,xmlsup=libxml2'; SELECT * FROM t2; DROP TABLE t1,t2; diff --git a/storage/connect/mysql-test/connect/t/xml_html.test b/storage/connect/mysql-test/connect/t/xml_html.test index 34d29953f68..1430f68d2b2 100644 --- a/storage/connect/mysql-test/connect/t/xml_html.test +++ b/storage/connect/mysql-test/connect/t/xml_html.test @@ -11,9 +11,9 @@ SET NAMES utf8; --echo # Testing HTML like XML file --echo # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/t/xml_mult.test b/storage/connect/mysql-test/connect/t/xml_mult.test index cf703e90da4..221d6734546 100644 --- a/storage/connect/mysql-test/connect/t/xml_mult.test +++ b/storage/connect/mysql-test/connect/t/xml_mult.test @@ -15,9 +15,9 @@ SET NAMES utf8; --echo # Testing expanded values --echo # CREATE TABLE `bookstore` ( - `category` CHAR(16) NOT NULL FIELD_FORMAT='@', + `category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, - `lang` char(2) NOT NULL FIELD_FORMAT='title/@', + `lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test index ad31ca46d4c..29ee2e0e607 100644 --- a/storage/connect/mysql-test/connect/t/xml_zip.test +++ b/storage/connect/mysql-test/connect/t/xml_zip.test @@ -11,26 +11,26 @@ let $MYSQLD_DATADIR= `select @@datadir`; --echo # Testing zipped XML tables --echo # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; #testing discovery CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=domdoc'; +OPTION_LIST='depth=0,xmlsup=domdoc'; SELECT * FROM t2; DROP TABLE t1,t2; diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test index dce68c17eee..1f0a4eedee9 100644 --- a/storage/connect/mysql-test/connect/t/zip.test +++ b/storage/connect/mysql-test/connect/t/zip.test @@ -83,37 +83,37 @@ DROP TABLE t1,t2,t3,t4; --echo # CREATE TABLE t1 ( _id INT(2) NOT NULL, -name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +name_first CHAR(9) NOT NULL JPATH='$.name.first', +name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +name_last CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth CHAR(20) DEFAULT NULL, death CHAR(20) DEFAULT NULL, -contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs', -awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award', -awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year', -awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by' +contribs CHAR(50) NOT NULL JPATH='$.contribs', +awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award', +awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year', +awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES; SELECT * FROM t1; # Test discovery CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1 -OPTION_LIST='LEVEL=5'; +OPTION_LIST='DEPTH=5'; SELECT * FROM t2; CREATE TABLE t3 ( _id INT(2) NOT NULL, -firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +firstname CHAR(9) NOT NULL JPATH='$.name.first', +aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +lastname CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", -contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]', -award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award', -year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year', -`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by' +contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]', +award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award', +year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year', +`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by' ) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES; SELECT * FROM t3 WHERE _id = 1; diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index 89b18f86323..e53ee1310e4 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -169,10 +169,9 @@ const char *PLGtoMYSQLtype(int type, bool dbf, char v) case TYPE_BIGINT: return "BIGINT"; case TYPE_TINY: return "TINYINT"; case TYPE_DECIM: return "DECIMAL"; - default: return "CHAR(0)"; + default: return (v) ? "VARCHAR" : "CHAR"; } // endswitch mytype - return "CHAR(0)"; } // end of PLGtoMYSQLtype /************************************************************************/ diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index a40e32bcfb2..dd204d065ed 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -83,7 +83,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */ TAB_ZIP = 27, /* ZIP file info table */ TAB_MONGO = 28, /* Table retrieved from MongoDB */ TAB_REST = 29, /* Table retrieved from Rest */ - TAB_NIY = 30}; /* Table not implemented yet */ + TAB_BSON = 30, /* BSON Table (development) */ + TAB_NIY = 31}; /* Table not implemented yet */ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_ROWID = 1, /* ROWID type (special column) */ @@ -160,7 +161,7 @@ enum RECFM {RECFM_DFLT = 0, /* Default table type */ RECFM_FMT = 8, /* FMT formatted file */ RECFM_VCT = 9, /* VCT formatted files */ RECFM_XML = 10, /* XML formatted files */ - RECFM_JASON = 11, /* JASON formatted files */ + RECFM_JSON = 11, /* JSON formatted files */ RECFM_DIR = 12, /* DIR table */ RECFM_ODBC = 13, /* Table accessed via ODBC */ RECFM_JDBC = 14, /* Table accessed via JDBC */ diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index e45feb31bea..f2887987c3f 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -96,7 +96,7 @@ char *msglang(void); typedef struct { ushort Segsize; ushort Size; - } AREASIZE; +} AREASIZE; ACTIVITY defActivity = { /* Describes activity and language */ NULL, /* Points to user work area(s) */ @@ -184,7 +184,7 @@ PGLOBAL PlugInit(LPCSTR Language, size_t worksize) /***********************************************************************/ /* PlugExit: Terminate Plug operations. */ /***********************************************************************/ -int PlugExit(PGLOBAL g) +PGLOBAL PlugExit(PGLOBAL g) { if (g) { PDBUSER dup = PlgGetUser(g); @@ -196,7 +196,7 @@ int PlugExit(PGLOBAL g) delete g; } // endif g - return 0; + return NULL; } // end of PlugExit /***********************************************************************/ @@ -204,7 +204,7 @@ int PlugExit(PGLOBAL g) /* Note: this routine is not really implemented for Unix. */ /***********************************************************************/ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) - { +{ #if defined(__WIN__) char drive[_MAX_DRIVE]; #else @@ -228,8 +228,7 @@ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) htrc("buff='%-.256s'\n", pBuff); return pBuff; - } // end of PlugRemoveType - +} // end of PlugRemoveType BOOL PlugIsAbsolutePath(LPCSTR path) { @@ -246,7 +245,7 @@ BOOL PlugIsAbsolutePath(LPCSTR path) /* Note: this routine is not really implemented for Unix. */ /***********************************************************************/ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) - { +{ char newname[_MAX_PATH]; char direc[_MAX_DIR], defdir[_MAX_DIR], tmpdir[_MAX_DIR]; char fname[_MAX_FNAME]; @@ -347,14 +346,14 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) } else return FileName; // Error, return unchanged name - } // end of PlugSetPath +} // end of PlugSetPath #if defined(XMSG) /***********************************************************************/ /* PlugGetMessage: get a message from the message file. */ /***********************************************************************/ char *PlugReadMessage(PGLOBAL g, int mid, char *m) - { +{ char msgfile[_MAX_PATH], msgid[32], buff[256]; char *msg; FILE *mfile = NULL; @@ -378,9 +377,9 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m) if (atoi(buff) == mid) break; - if (sscanf(buff, " %*d %-.256s \"%[^\"]", msgid, stmsg) < 2) { + if (sscanf(buff, " %*d %.31s \"%.255[^\"]", msgid, stmsg) < 2) { // Old message file - if (!sscanf(buff, " %*d \"%[^\"]", stmsg)) { + if (!sscanf(buff, " %*d \"%.255[^\"]", stmsg)) { sprintf(stmsg, "Bad message file for %d %-.256s", mid, SVP(m)); goto fin; } else @@ -405,14 +404,14 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m) msg = stmsg; return msg; - } // end of PlugReadMessage +} // end of PlugReadMessage #elif defined(NEWMSG) /***********************************************************************/ /* PlugGetMessage: get a message from the resource string table. */ /***********************************************************************/ char *PlugGetMessage(PGLOBAL g, int mid) - { +{ char *msg; #if 0 // was !defined(UNIX) && !defined(UNIV_LINUX) @@ -440,7 +439,7 @@ char *PlugGetMessage(PGLOBAL g, int mid) msg = stmsg; return msg; - } // end of PlugGetMessage +} // end of PlugGetMessage #endif // NEWMSG #if defined(__WIN__) @@ -448,13 +447,13 @@ char *PlugGetMessage(PGLOBAL g, int mid) /* Return the line length of the console screen buffer. */ /***********************************************************************/ short GetLineLength(PGLOBAL g) - { +{ CONSOLE_SCREEN_BUFFER_INFO coninfo; HANDLE hcons = GetStdHandle(STD_OUTPUT_HANDLE); BOOL b = GetConsoleScreenBufferInfo(hcons, &coninfo); return (b) ? coninfo.dwSize.X : 0; - } // end of GetLineLength +} // end of GetLineLength #endif // __WIN__ /***********************************************************************/ @@ -475,17 +474,19 @@ bool AllocSarea(PGLOBAL g, size_t size) if (!g->Sarea) { sprintf(g->Message, MSG(MALLOC_ERROR), "malloc"); g->Sarea_Size = 0; - } else - g->Sarea_Size = size; + } else { + g->Sarea_Size = size; + PlugSubSet(g->Sarea, size); + } // endif Sarea #if defined(DEVELOPMENT) if (true) { #else if (trace(8)) { #endif - if (g->Sarea) + if (g->Sarea) { htrc("Work area of %zd allocated at %p\n", size, g->Sarea); - else + } else htrc("SareaAlloc: %-.256s\n", g->Message); } // endif trace @@ -526,13 +527,13 @@ void FreeSarea(PGLOBAL g) /* the address and size not larger than memory size. */ /***********************************************************************/ BOOL PlugSubSet(void *memp, size_t size) - { +{ PPOOLHEADER pph = (PPOOLHEADER)memp; pph->To_Free = (size_t)sizeof(POOLHEADER); pph->FreeBlk = size - pph->To_Free; return FALSE; - } /* end of PlugSubSet */ +} /* end of PlugSubSet */ /***********************************************************************/ /* Use it to export a function that do throwing. */ @@ -595,7 +596,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) /* Program for sub-allocating and copying a string in a storage area. */ /***********************************************************************/ char *PlugDup(PGLOBAL g, const char *str) - { +{ if (str) { char *sm = (char*)PlugSubAlloc(g, NULL, strlen(str) + 1); @@ -604,6 +605,33 @@ char *PlugDup(PGLOBAL g, const char *str) } else return NULL; - } // end of PlugDup +} // end of PlugDup + +/*************************************************************************/ +/* This routine makes a pointer from an offset to a memory pointer. */ +/*************************************************************************/ +void* MakePtr(void* memp, size_t offset) +{ + // return ((offset == 0) ? NULL : &((char*)memp)[offset]); + return (!offset) ? NULL : (char *)memp + offset; +} /* end of MakePtr */ + +/*************************************************************************/ +/* This routine makes an offset from a pointer new format. */ +/*************************************************************************/ +size_t MakeOff(void* memp, void* ptr) +{ + if (ptr) { +#if defined(_DEBUG) || defined(DEVELOPMENT) + if (ptr <= memp) { + fprintf(stderr, "ptr %p <= memp %p", ptr, memp); + DoThrow(999); + } // endif ptr +#endif // _DEBUG || DEVELOPMENT + return (size_t)(((char*)ptr) - ((char*)memp)); + } else + return 0; + +} /* end of MakeOff */ -/*--------------------- End of PLUGUTIL program -----------------------*/ +/*---------------------- End of PLUGUTIL program ------------------------*/ diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp new file mode 100644 index 00000000000..db63b8e78db --- /dev/null +++ b/storage/connect/tabbson.cpp @@ -0,0 +1,2562 @@ +/************* tabbson C++ Program Source Code File (.CPP) *************/ +/* PROGRAM NAME: tabbson Version 1.0 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* This program are the BSON class DB execution routines. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant sections of the MariaDB header file. */ +/***********************************************************************/ +#include <my_global.h> + +/***********************************************************************/ +/* Include application header files: */ +/* global.h is header containing all global declarations. */ +/* plgdbsem.h is header containing the DB application declarations. */ +/* tdbdos.h is header containing the TDBDOS declarations. */ +/* json.h is header containing the JSON classes declarations. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "maputil.h" +#include "filamtxt.h" +#include "tabdos.h" +#include "tabbson.h" +#include "filamap.h" +#if defined(GZ_SUPPORT) +#include "filamgz.h" +#endif // GZ_SUPPORT +#if defined(ZIP_SUPPORT) +#include "filamzip.h" +#endif // ZIP_SUPPORT +#if defined(JAVA_SUPPORT) +#include "jmgfam.h" +#endif // JAVA_SUPPORT +#if defined(CMGO_SUPPORT) +#include "cmgfam.h" +#endif // CMGO_SUPPORT +#include "tabmul.h" +#include "checklvl.h" +#include "resource.h" +#include "mycat.h" // for FNC_COL + +/***********************************************************************/ +/* This should be an option. */ +/***********************************************************************/ +#define MAXCOL 200 /* Default max column nb in result */ +//#define TYPE_UNKNOWN 12 /* Must be greater than other types */ + +/***********************************************************************/ +/* External functions. */ +/***********************************************************************/ +USETEMP UseTemp(void); +bool JsonAllPath(void); +int GetDefaultDepth(void); +char *GetJsonNull(void); + +/***********************************************************************/ +/* BSONColumns: construct the result blocks containing the description */ +/* of all the columns of a table contained inside a JSON file. */ +/***********************************************************************/ +PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) +{ + static int buftyp[] = { TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, + TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING }; + static XFLD fldtyp[] = { FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC, + FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT }; + static unsigned int length[] = { 0, 6, 8, 10, 10, 6, 6, 0 }; + int i, n = 0; + int ncol = sizeof(buftyp) / sizeof(int); + PJCL jcp; + BSONDISC* pjdc = NULL; + PQRYRES qrp; + PCOLRES crp; + + if (info) { + length[0] = 128; + length[7] = 256; + goto skipit; + } // endif info + + if (GetIntegerTableOption(g, topt, "Multiple", 0)) { + strcpy(g->Message, "Cannot find column definition for multiple table"); + return NULL; + } // endif Multiple + + pjdc = new(g) BSONDISC(g, length); + + if (!(n = pjdc->GetColumns(g, db, dsn, topt))) + return NULL; + +skipit: + if (trace(1)) + htrc("BSONColumns: n=%d len=%d\n", n, length[0]); + + /*********************************************************************/ + /* Allocate the structures used to refer to the result set. */ + /*********************************************************************/ + qrp = PlgAllocResult(g, ncol, n, IDS_COLUMNS + 3, + buftyp, fldtyp, length, false, false); + + crp = qrp->Colresp->Next->Next->Next->Next->Next->Next; + crp->Name = PlugDup(g, "Nullable"); + crp->Next->Name = PlugDup(g, "Jpath"); + + if (info || !qrp) + return qrp; + + qrp->Nblin = n; + + /*********************************************************************/ + /* Now get the results into blocks. */ + /*********************************************************************/ + for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) { + if (jcp->Type == TYPE_UNKNOWN) + jcp->Type = TYPE_STRG; // Void column + + crp = qrp->Colresp; // Column Name + crp->Kdata->SetValue(jcp->Name, i); + crp = crp->Next; // Data Type + crp->Kdata->SetValue(jcp->Type, i); + crp = crp->Next; // Type Name + crp->Kdata->SetValue(GetTypeName(jcp->Type), i); + crp = crp->Next; // Precision + crp->Kdata->SetValue(jcp->Len, i); + crp = crp->Next; // Length + crp->Kdata->SetValue(jcp->Len, i); + crp = crp->Next; // Scale (precision) + crp->Kdata->SetValue(jcp->Scale, i); + crp = crp->Next; // Nullable + crp->Kdata->SetValue(jcp->Cbn ? 1 : 0, i); + crp = crp->Next; // Field format + + if (crp->Kdata) + crp->Kdata->SetValue(jcp->Fmt, i); + + } // endfor i + +/*********************************************************************/ +/* Return the result pointer. */ +/*********************************************************************/ + return qrp; +} // end of BSONColumns + +/* -------------------------- Class BSONDISC ------------------------- */ + +/***********************************************************************/ +/* Class used to get the columns of a JSON table. */ +/***********************************************************************/ +BSONDISC::BSONDISC(PGLOBAL g, uint* lg) +{ + length = lg; + jcp = fjcp = pjcp = NULL; + tdp = NULL; + tjnp = NULL; + jpp = NULL; + tjsp = NULL; + jsp = NULL; + bp = NULL; + row = NULL; + sep = NULL; + i = n = bf = ncol = lvl = sz = limit = 0; + all = strfy = false; +} // end of BSONDISC constructor + +int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) +{ + char filename[_MAX_PATH]; + bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + PBVAL bdp = NULL; + + lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth()); + lvl = GetIntegerTableOption(g, topt, "Depth", lvl); + sep = GetStringTableOption(g, topt, "Separator", "."); + sz = GetIntegerTableOption(g, topt, "Jsize", 1024); + limit = GetIntegerTableOption(g, topt, "Limit", 10); + strfy = GetBooleanTableOption(g, topt, "Stringify", false); + + /*********************************************************************/ + /* Open the input file. */ + /*********************************************************************/ + tdp = new(g) BSONDEF; + tdp->G = NULL; +#if defined(ZIP_SUPPORT) + tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); + tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); +#endif // ZIP_SUPPORT + tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); + + if (!(tdp->Database = SetPath(g, db))) + return 0; + + tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); + tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; + tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); + tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL); + tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false); + tdp->Uri = (dsn && *dsn ? dsn : NULL); + + if (!tdp->Fn && !tdp->Uri) { + strcpy(g->Message, MSG(MISSING_FNAME)); + return 0; + } // endif Fn + + if (tdp->Fn) { + // We used the file name relative to recorded datapath + PlugSetPath(filename, tdp->Fn, tdp->GetPath()); + tdp->Fn = PlugDup(g, filename); + } // endif Fn + + if (trace(1)) + htrc("File %s objname=%s pretty=%d lvl=%d\n", + tdp->Fn, tdp->Objname, tdp->Pretty, lvl); + + if (tdp->Uri) { +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + tdp->Collname = GetStringTableOption(g, topt, "Name", NULL); + tdp->Collname = GetStringTableOption(g, topt, "Tabname", tdp->Collname); + tdp->Schema = GetStringTableOption(g, topt, "Dbname", "test"); + tdp->Options = (PSZ)GetStringTableOption(g, topt, "Colist", "all"); + tdp->Pipe = GetBooleanTableOption(g, topt, "Pipeline", false); + tdp->Driver = (PSZ)GetStringTableOption(g, topt, "Driver", NULL); + tdp->Version = GetIntegerTableOption(g, topt, "Version", 3); + tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper", + (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface"); + tdp->Pretty = 0; +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return 0; +#endif // !MONGO_SUPPORT + } // endif Uri + + if (tdp->Pretty == 2) { + tdp->G = g; + + if (tdp->Zipped) { +#if defined(ZIP_SUPPORT) + tjsp = new(g) TDBBSON(g, tdp, new(g) UNZFAM(tdp)); +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return 0; +#endif // !ZIP_SUPPORT + } else + tjsp = new(g) TDBBSON(g, tdp, new(g) MAPFAM(tdp)); + + if (tjsp->MakeDocument(g)) + return 0; + + bp = tjsp->Bp; +// bdp = tjsp->GetDoc() ? bp->GetBson(tjsp->GetDoc()) : NULL; + bdp = tjsp->GetDoc(); + jsp = bdp ? bp->GetArrayValue(bdp, 0) : NULL; + } else { + if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) { + if (!mgo) { + sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty); + return 0; + } else + tdp->Lrecl = 8192; // Should be enough + + } // endif Lrecl + + // Allocate the parse work memory + tdp->G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 4 : 2)); + tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF); + + if (tdp->Zipped) { +#if defined(ZIP_SUPPORT) + tjnp = new(g)TDBBSN(g, tdp, new(g) UNZFAM(tdp)); +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT + } else if (tdp->Uri) { + if (tdp->Driver && toupper(*tdp->Driver) == 'C') { +#if defined(CMGO_SUPPORT) + tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp)); +#else + sprintf(g->Message, "Mongo %s Driver not available", "C"); + return 0; +#endif + } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') { +#if defined(JAVA_SUPPORT) + tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp)); +#else + sprintf(g->Message, "Mongo %s Driver not available", "Java"); + return 0; +#endif + } else { // Driver not specified +#if defined(CMGO_SUPPORT) + tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp)); +#elif defined(JAVA_SUPPORT) + tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp)); +#else + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return 0; +#endif + } // endif Driver + + } else if (tdp->Pretty >= 0) + tjnp = new(g) TDBBSN(g, tdp, new(g) DOSFAM(tdp)); + else + tjnp = new(g) TDBBSN(g, tdp, new(g) BINFAM(tdp)); + + tjnp->SetMode(MODE_READ); + bp = tjnp->Bp; + + if (tjnp->OpenDB(g)) + return 0; + + switch (tjnp->ReadDB(g)) { + case RC_EF: + strcpy(g->Message, "Void json table"); + case RC_FX: + goto err; + default: + jsp = tjnp->Row; + } // endswitch ReadDB + + } // endif pretty + + if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) { + strcpy(g->Message, "Can only retrieve columns from object rows"); + goto err; + } // endif row + + all = GetBooleanTableOption(g, topt, "Fullarray", false); + jcol.Name = jcol.Fmt = NULL; + jcol.Next = NULL; + jcol.Found = true; + colname[0] = 0; + + if (!tdp->Uri) { + fmt[0] = '$'; + fmt[1] = '.'; + bf = 2; + } // endif Uri + + /*********************************************************************/ + /* Analyse the JSON tree and define columns. */ + /*********************************************************************/ + for (i = 1; ; i++) { + for (jpp = row; jpp; jpp = bp->GetNext(jpp)) { + strncpy(colname, bp->GetKey(jpp), 64); + fmt[bf] = 0; + + if (Find(g, bp->GetVlp(jpp), colname, MY_MIN(lvl, 0))) + goto err; + + } // endfor jpp + + // Missing column can be null + for (jcp = fjcp; jcp; jcp = jcp->Next) { + jcp->Cbn |= !jcp->Found; + jcp->Found = false; + } // endfor jcp + + if (tdp->Pretty != 2) { + // Read next record + switch (tjnp->ReadDB(g)) { + case RC_EF: + jsp = NULL; + break; + case RC_FX: + goto err; + default: + jsp = tjnp->Row; + } // endswitch ReadDB + + } else + jsp = bp->GetArrayValue(bdp, i); + + if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) + break; + + } // endfor i + + if (tdp->Pretty != 2) + tjnp->CloseDB(g); + + return n; + +err: + if (tdp->Pretty != 2) + tjnp->CloseDB(g); + + return 0; +} // end of GetColumns + +bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) +{ + char *p, *pc = colname + strlen(colname), buf[32]; + int ars; + size_t n; + PBVAL job; + PBVAL jar; + + if (jvp && !bp->IsJson(jvp)) { + if (JsonAllPath() && !fmt[bf]) + strcat(fmt, colname); + + jcol.Type = (JTYP)jvp->Type; + + switch (jvp->Type) { + case TYPE_STRG: + case TYPE_DTM: + jcol.Len = (int)strlen(bp->GetString(jvp)); + break; + case TYPE_INTG: + case TYPE_BINT: + jcol.Len = (int)strlen(bp->GetString(jvp, buf)); + break; + case TYPE_DBL: + case TYPE_FLOAT: + jcol.Len = (int)strlen(bp->GetString(jvp, buf)); + jcol.Scale = jvp->Nd; + break; + case TYPE_BOOL: + jcol.Len = 1; + break; + default: + jcol.Len = 0; + break; + } // endswitch Type + + jcol.Scale = jvp->Nd; + jcol.Cbn = jvp->Type == TYPE_NULL; + } else if (!jvp || bp->IsValueNull(jvp)) { + jcol.Type = TYPE_UNKNOWN; + jcol.Len = jcol.Scale = 0; + jcol.Cbn = true; + } else if (j < lvl) { + if (!fmt[bf]) + strcat(fmt, colname); + + p = fmt + strlen(fmt); + jsp = jvp; + + switch (jsp->Type) { + case TYPE_JOB: + job = jsp; + + for (PBPR jrp = bp->GetObject(job); jrp; jrp = bp->GetNext(jrp)) { + PCSZ k = bp->GetKey(jrp); + + if (*k != '$') { + n = sizeof(fmt) - strlen(fmt) - 1; + strncat(strncat(fmt, sep, n), k, n - strlen(sep)); + n = sizeof(colname) - strlen(colname) - 1; + strncat(strncat(colname, "_", n), k, n - 1); + } // endif Key + + if (Find(g, bp->GetVlp(jrp), k, j + 1)) + return true; + + *p = *pc = 0; + } // endfor jrp + + return false; + case TYPE_JAR: + jar = jsp; + + if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key))) + ars = MY_MIN(bp->GetArraySize(jar), limit); + else + ars = MY_MIN(bp->GetArraySize(jar), 1); + + for (int k = 0; k < ars; k++) { + n = sizeof(fmt) - (strlen(fmt) + 1); + + if (!tdp->Xcol || stricmp(tdp->Xcol, key)) { + sprintf(buf, "%d", k); + + if (tdp->Uri) { + strncat(strncat(fmt, sep, n), buf, n - strlen(sep)); + } else { + strncat(strncat(fmt, "[", n), buf, n - 1); + strncat(fmt, "]", n - (strlen(buf) + 1)); + } // endif uri + + if (all) { + n = sizeof(colname) - (strlen(colname) + 1); + strncat(strncat(colname, "_", n), buf, n - 1); + } // endif all + + } else { + strncat(fmt, (tdp->Uri ? sep : "[*]"), n); + } + + if (Find(g, bp->GetArrayValue(jar, k), "", j)) + return true; + + *p = *pc = 0; + } // endfor k + + return false; + default: + sprintf(g->Message, "Logical error after %s", fmt); + return true; + } // endswitch Type + + } else if (lvl >= 0) { + if (strfy) { + if (!fmt[bf]) + strcat(fmt, colname); + + strcat(fmt, ".*"); + } else if (JsonAllPath() && !fmt[bf]) + strcat(fmt, colname); + + jcol.Type = TYPE_STRG; + jcol.Len = sz; + jcol.Scale = 0; + jcol.Cbn = true; + } else + return false; + + AddColumn(g); + return false; +} // end of Find + +void BSONDISC::AddColumn(PGLOBAL g) +{ + bool b = fmt[bf] != 0; // True if formatted + + // Check whether this column was already found + for (jcp = fjcp; jcp; jcp = jcp->Next) + if (!strcmp(colname, jcp->Name)) + break; + + if (jcp) { + if (jcp->Type != jcol.Type) { + if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL) + jcp->Type = jcol.Type; + // else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID) + // jcp->Type = TYPE_STRING; + else if (jcp->Type != TYPE_STRG) + switch (jcol.Type) { + case TYPE_STRG: + case TYPE_DBL: + jcp->Type = jcol.Type; + break; + case TYPE_BINT: + if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL) + jcp->Type = jcol.Type; + + break; + case TYPE_INTG: + if (jcp->Type == TYPE_BOOL) + jcp->Type = jcol.Type; + + break; + default: + break; + } // endswith Type + + } // endif Type + + if (b && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) { + jcp->Fmt = PlugDup(g, fmt); + length[7] = MY_MAX(length[7], strlen(fmt)); + } // endif fmt + + jcp->Len = MY_MAX(jcp->Len, jcol.Len); + jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale); + jcp->Cbn |= jcol.Cbn; + jcp->Found = true; + } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) { + // New column + jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL)); + *jcp = jcol; + jcp->Cbn |= (i > 1); + jcp->Name = PlugDup(g, colname); + length[0] = MY_MAX(length[0], strlen(colname)); + + if (b) { + jcp->Fmt = PlugDup(g, fmt); + length[7] = MY_MAX(length[7], strlen(fmt)); + } else + jcp->Fmt = NULL; + + if (pjcp) { + jcp->Next = pjcp->Next; + pjcp->Next = jcp; + } else + fjcp = jcp; + + n++; + } // endif jcp + + if (jcp) + pjcp = jcp; + +} // end of AddColumn + +/* -------------------------- Class BTUTIL --------------------------- */ + +/***********************************************************************/ +/* Find the row in the tree structure. */ +/***********************************************************************/ +PBVAL BTUTIL::FindRow(PGLOBAL g) +{ + char *p, *objpath; + PBVAL jsp = Tp->Row; + PBVAL val = NULL; + + for (objpath = PlugDup(g, Tp->Objname); jsp && objpath; objpath = p) { + if ((p = strchr(objpath, Tp->Sep))) + *p++ = 0; + + if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key + val = (jsp->Type == TYPE_JOB) ? + GetKeyValue(jsp, objpath) : NULL; + } else { + if (*objpath == '[') { + if (objpath[strlen(objpath) - 1] == ']') + objpath++; + else + return NULL; + } // endif [ + + val = (jsp->Type == TYPE_JAR) ? + GetArrayValue(GetArray(jsp), atoi(objpath) - Tp->B) : NULL; + } // endif objpath + + // jsp = (val) ? val->GetJson() : NULL; + jsp = val; + } // endfor objpath + + return jsp; +} // end of FindRow + +/***********************************************************************/ +/* Parse the read line. */ +/***********************************************************************/ +PBVAL BTUTIL::ParseLine(PGLOBAL g, int prty, bool cma) +{ + pretty = prty; + comma = cma; + return ParseJson(g, Tp->To_Line, strlen(Tp->To_Line)); +} // end of ParseLine + +/***********************************************************************/ +/* Make the top tree from the object path. */ +/***********************************************************************/ +PBVAL BTUTIL::MakeTopTree(PGLOBAL g, int type) +{ + PBVAL top = NULL, val = NULL; + + if (Tp->Objname) { + if (!Tp->Row) { + // Parse and allocate Objpath item(s) + char* p; + char *objpath = PlugDup(g, Tp->Objname); + int i; + PBVAL objp = NULL; + PBVAL arp = NULL; + + for (; objpath; objpath = p) { + if ((p = strchr(objpath, Tp->Sep))) + *p++ = 0; + + if (*objpath != '[' && !IsNum(objpath)) { + objp = NewVal(TYPE_JOB); + + if (!top) + top = objp; + + if (val) + SetValueObj(val, objp); + + val = NewVal(); + SetKeyValue(objp, MOF(val), objpath); + } else { + if (*objpath == '[') { + // Old style + if (objpath[strlen(objpath) - 1] != ']') { + sprintf(g->Message, "Invalid Table path %s", Tp->Objname); + return NULL; + } else + objpath++; + + } // endif objpath + + if (!top) + top = NewVal(TYPE_JAR); + + if (val) + SetValueArr(val, arp); + + val = NewVal(); + i = atoi(objpath) - Tp->B; + SetArrayValue(arp, val, i); + } // endif objpath + + } // endfor p + + } // endif Val + + Tp->Row = val; + if (Tp->Row) Tp->Row->Type = type; + } else + top = Tp->Row = NewVal(type); + + return top; +} // end of MakeTopTree + +PSZ BTUTIL::SerialVal(PGLOBAL g, PBVAL vlp, int pretty) +{ + return Serialize(g, vlp, NULL, pretty); +} // en of SerialTop + +/* -------------------------- Class BCUTIL --------------------------- */ + +/***********************************************************************/ +/* SetValue: Set a value from a BVALUE contains. */ +/***********************************************************************/ +void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) +{ + if (jvp) { + vp->SetNull(false); + + if (Jb) { + vp->SetValue_psz(Serialize(g, jvp, NULL, 0)); + Jb = false; + } else switch (jvp->Type) { + case TYPE_STRG: + case TYPE_INTG: + case TYPE_BINT: + case TYPE_DBL: + case TYPE_DTM: + case TYPE_FLOAT: + switch (vp->GetType()) { + case TYPE_STRING: + case TYPE_DATE: + case TYPE_DECIM: + vp->SetValue_psz(GetString(jvp)); + break; + case TYPE_INT: + case TYPE_SHORT: + case TYPE_TINY: + vp->SetValue(GetInteger(jvp)); + break; + case TYPE_BIGINT: + vp->SetValue(GetBigint(jvp)); + break; + case TYPE_DOUBLE: + vp->SetValue(GetDouble(jvp)); + + if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT) + vp->SetPrec(jvp->Nd); + + break; + default: + sprintf(G->Message, "Unsupported column type %d", vp->GetType()); + throw 888; + } // endswitch Type + + break; + case TYPE_BOOL: + if (vp->IsTypeNum()) + vp->SetValue(GetInteger(jvp) ? 1 : 0); + else + vp->SetValue_psz((PSZ)(GetInteger(jvp) ? "true" : "false")); + + break; + case TYPE_JAR: + case TYPE_JOB: + // SetJsonValue(g, vp, val->GetArray()->GetValue(0)); + vp->SetValue_psz(GetValueText(g, jvp, NULL)); + break; + default: + vp->Reset(); + vp->SetNull(true); + } // endswitch Type + + } else { + vp->Reset(); + vp->SetNull(true); + } // endif val + +} // end of SetJsonValue + +/***********************************************************************/ +/* MakeJson: Serialize the json item and set value to it. */ +/***********************************************************************/ +PBVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp, int n) +{ + PBVAL vlp, jvp = jsp; + + if (n < Cp->Nod - 1) { + if (jsp->Type == TYPE_JAR) { + int ars = GetArraySize(jsp); + PJNODE jnp = &Cp->Nodes[n]; + + jvp = NewVal(TYPE_JAR); + jnp->Op = OP_EQ; + + for (int i = 0; i < ars; i++) { + jnp->Rank = i; + vlp = GetRowValue(g, jsp, n); + AddArrayValue(jvp,DupVal(vlp)); + } // endfor i + + jnp->Op = OP_XX; + jnp->Rank = 0; + } else if (jsp->Type == TYPE_JOB) { + jvp = NewVal(TYPE_JOB); + + for (PBPR prp = GetObject(jsp); prp; prp = GetNext(prp)) { + vlp = GetRowValue(g, GetVlp(prp), n + 1); + SetKeyValue(jvp, vlp, MZP(prp->Key)); + } // endfor prp + + } // endif Type + + } // endif's + + Jb = true; + return jvp; +} // end of MakeBson + +/***********************************************************************/ +/* GetRowValue: */ +/***********************************************************************/ +PBVAL BCUTIL::GetRowValue(PGLOBAL g, PBVAL row, int i) +{ + int nod = Cp->Nod, n = nod - 1; + JNODE *nodes = Cp->Nodes; + PBVAL arp; + PBVAL bvp = NULL; + + for (; i < nod && row; i++) { + if (nodes[i].Op == OP_NUM) { + bvp = NewVal(TYPE_INT); + bvp->N = (row->Type == TYPE_JAR) ? GetSize(row) : 1; + return(bvp); + } else if (nodes[i].Op == OP_XX) { + return MakeBson(g, row, i); + } else switch (row->Type) { + case TYPE_JOB: + if (!nodes[i].Key) { + // Expected Array was not there, wrap the value + if (i < nod - 1) + continue; + else + bvp = row; + + } else + bvp = GetKeyValue(row, nodes[i].Key); + + break; + case TYPE_JAR: + arp = row; + + if (!nodes[i].Key) { + if (nodes[i].Op == OP_EQ) + bvp = GetArrayValue(arp, nodes[i].Rank); + else if (nodes[i].Op == OP_EXP) + return NewVal(ExpandArray(g, arp, i)); + else + return NewVal(CalculateArray(g, arp, i)); + + } else { + // Unexpected array, unwrap it as [0] + bvp = GetArrayValue(arp, 0); + i--; + } // endif's + + break; + case TYPE_JVAL: + bvp = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + bvp = NULL; + } // endswitch Type + + if (i < nod - 1) + row = bvp; + + } // endfor i + + return bvp; +} // end of GetColumnValue + +/***********************************************************************/ +/* GetColumnValue: */ +/***********************************************************************/ +PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) +{ + PVAL value = Cp->Value; + PBVAL bvp = GetRowValue(g, row, i); + + SetJsonValue(g, value, bvp); + return value; +} // end of GetColumnValue + +/***********************************************************************/ +/* ExpandArray: */ +/***********************************************************************/ +PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n) +{ + int nod = Cp->Nod, ars = MY_MIN(Tp->Limit, GetArraySize(arp)); + JNODE *nodes = Cp->Nodes; + PVAL value = Cp->Value; + PBVAL bvp; + BVAL bval; + + if (!ars) { + value->Reset(); + value->SetNull(true); + Tp->NextSame = 0; + return value; + } // endif ars + + if (!(bvp = GetArrayValue(arp, (nodes[n].Rx = nodes[n].Nx)))) { + strcpy(g->Message, "Logical error expanding array"); + throw 666; + } // endif jvp + + if (n < nod - 1 && IsJson(bvp)) { + SetValue(&bval, GetColumnValue(g, bvp, n + 1)); + bvp = &bval; + } // endif n + + if (n >= Tp->NextSame) { + if (++nodes[n].Nx == ars) { + nodes[n].Nx = 0; + Cp->Xnod = 0; + } else + Cp->Xnod = n; + + Tp->NextSame = Cp->Xnod; + } // endif NextSame + + SetJsonValue(g, value, bvp); + return value; +} // end of ExpandArray + +/***********************************************************************/ +/* CalculateArray: */ +/***********************************************************************/ +PVAL BCUTIL::CalculateArray(PGLOBAL g, PBVAL arp, int n) +{ + int i, ars, nv = 0, nextsame = Tp->NextSame; + bool err; + int nod = Cp->Nod; + JNODE *nodes = Cp->Nodes; + OPVAL op = nodes[n].Op; + PVAL val[2], vp = nodes[n].Valp, mulval = Cp->MulVal; + PBVAL jvrp, jvp; + BVAL jval; + + vp->Reset(); + ars = MY_MIN(Tp->Limit, GetArraySize(arp)); + xtrc(1,"CalculateArray: size=%d op=%d nextsame=%d\n", ars, op, nextsame); + + for (i = 0; i < ars; i++) { + jvrp = GetArrayValue(arp, i); + xtrc(1, "i=%d nv=%d\n", i, nv); + + if (!IsValueNull(jvrp) || (op == OP_CNC && GetJsonNull())) do { + if (IsValueNull(jvrp)) { + SetString(jvrp, PlugDup(G, GetJsonNull())); + jvp = jvrp; + } else if (n < nod - 1 && IsJson(jvrp)) { + Tp->NextSame = nextsame; + SetValue(&jval, GetColumnValue(g, jvrp, n + 1)); + jvp = &jval; + } else + jvp = jvrp; + + xtrc(1, "jvp=%s null=%d\n", GetString(jvp), IsValueNull(jvp) ? 1 : 0); + + if (!nv++) { + SetJsonValue(g, vp, jvp); + continue; + } else + SetJsonValue(g, mulval, jvp); + + if (!mulval->IsNull()) { + switch (op) { + case OP_CNC: + if (nodes[n].CncVal) { + val[0] = nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal + + val[0] = mulval; + err = vp->Compute(g, val, 1, op); + break; + // case OP_NUM: + case OP_SEP: + val[0] = nodes[n].Valp; + val[1] = mulval; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = nodes[n].Valp; + val[1] = mulval; + err = vp->Compute(g, val, 2, op); + } // endswitch Op + + if (err) + vp->Reset(); + + if (trace(1)) { + char buf(32); + + htrc("vp='%s' err=%d\n", + vp->GetCharString(&buf), err ? 1 : 0); + + } // endif trace + + } // endif Null + + } while (Tp->NextSame > nextsame); + + } // endfor i + + if (op == OP_SEP) { + // Calculate average + mulval->SetValue(nv); + val[0] = vp; + val[1] = mulval; + + if (vp->Compute(g, val, 2, OP_DIV)) + vp->Reset(); + + } // endif Op + + Tp->NextSame = nextsame; + return vp; +} // end of CalculateArray + +/***********************************************************************/ +/* GetRow: Get the object containing this column. */ +/***********************************************************************/ +PBVAL BCUTIL::GetRow(PGLOBAL g) +{ + int nod = Cp->Nod; + JNODE *nodes = Cp->Nodes; + PBVAL val = NULL; + PBVAL arp; + PBVAL nwr, row = Tp->Row; + + for (int i = 0; i < nod && row; i++) { + if (i < nod-1 && nodes[i+1].Op == OP_XX) + break; + else switch (row->Type) { + case TYPE_JOB: + if (!nodes[i].Key) + // Expected Array was not there, wrap the value + continue; + + val = GetKeyValue(row, nodes[i].Key); + break; + case TYPE_JAR: + arp = row; + + if (!nodes[i].Key) { + if (nodes[i].Op == OP_EQ) + val = GetArrayValue(arp, nodes[i].Rank); + else + val = GetArrayValue(arp, nodes[i].Rx); + + } else { + // Unexpected array, unwrap it as [0] + val = GetArrayValue(arp, 0); + i--; + } // endif Nodes + + break; + case TYPE_JVAL: + val = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + val = NULL; + } // endswitch Type + + if (val) { + row = val; + } else { + // Construct missing objects + for (i++; row && i < nod; i++) { + int type; + + if (nodes[i].Op == OP_XX) + break; + else if (!nodes[i].Key) + // Construct intermediate array + type = TYPE_JAR; + else + type = TYPE_JOB; + + if (row->Type == TYPE_JOB) { + nwr = AddPair(row, nodes[i - 1].Key, type); + } else if (row->Type == TYPE_JAR) { + AddArrayValue(row, (nwr = NewVal(type))); + } else { + strcpy(g->Message, "Wrong type when writing new row"); + nwr = NULL; + } // endif's + + row = nwr; + } // endfor i + + break; + } // endelse + + } // endfor i + + return row; +} // end of GetRow + + +/* -------------------------- Class BSONDEF -------------------------- */ + +BSONDEF::BSONDEF(void) +{ + Jmode = MODE_OBJECT; + Objname = NULL; + Xcol = NULL; + Pretty = 2; + Limit = 1; + Base = 0; + Strict = false; + Sep = '.'; + Uri = NULL; + Collname = Options = Filter = NULL; + Pipe = false; + Driver = NULL; + Version = 0; + Wrapname = NULL; +} // end of BSONDEF constructor + +/***********************************************************************/ +/* DefineAM: define specific AM block values. */ +/***********************************************************************/ +bool BSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) +{ + G = g; + Schema = GetStringCatInfo(g, "DBname", Schema); + Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT); + Objname = GetStringCatInfo(g, "Object", NULL); + Xcol = GetStringCatInfo(g, "Expand", NULL); + Pretty = GetIntCatInfo("Pretty", 2); + Limit = GetIntCatInfo("Limit", 10); + Base = GetIntCatInfo("Base", 0) ? 1 : 0; + Sep = *GetStringCatInfo(g, "Separator", "."); + Accept = GetBoolCatInfo("Accept", false); + + // Don't use url as MONGO uri when called from REST + if (stricmp(am, "REST") && (Uri = GetStringCatInfo(g, "Connect", NULL))) { +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + Collname = GetStringCatInfo(g, "Name", + (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); + Collname = GetStringCatInfo(g, "Tabname", Collname); + Options = GetStringCatInfo(g, "Colist", NULL); + Filter = GetStringCatInfo(g, "Filter", NULL); + Pipe = GetBoolCatInfo("Pipeline", false); + Driver = GetStringCatInfo(g, "Driver", NULL); + Version = GetIntCatInfo("Version", 3); + Pretty = 0; +#if defined(JAVA_SUPPORT) + if (Version == 2) + Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo2Interface"); + else + Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo3Interface"); +#endif // JAVA_SUPPORT +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return true; +#endif // !MONGO_SUPPORT + } // endif Uri + + return DOSDEF::DefineAM(g, (Uri ? "XMGO" : "DOS"), poff); +} // end of DefineAM + +/***********************************************************************/ +/* GetTable: makes a new Table Description Block. */ +/***********************************************************************/ +PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) +{ + if (trace(1)) + htrc("BSON GetTable Pretty=%d Uri=%s\n", Pretty, SVP(Uri)); + + if (Catfunc == FNC_COL) + return new(g)TDBBCL(this); + + PTDBASE tdbp; + PTXF txfp = NULL; + + // JSN not used for pretty=1 for insert or delete + if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { + USETEMP tmp = UseTemp(); + bool map = Mapped && Pretty >= 0 && m != MODE_INSERT && + !(tmp != TMP_NO && m == MODE_UPDATE) && + !(tmp == TMP_FORCE && (m == MODE_UPDATE || m == MODE_DELETE)); + + if (Lrecl) { + // Allocate the parse work memory + G = PlugInit(NULL, (size_t)Lrecl * (Pretty < 0 ? 2 : 4)); + } else { + strcpy(g->Message, "LRECL is not defined"); + return NULL; + } // endif Lrecl + + if (Pretty < 0) { // BJsonfile + txfp = new(g) BINFAM(this); + } else if (Uri) { + if (Driver && toupper(*Driver) == 'C') { +#if defined(CMGO_SUPPORT) + txfp = new(g) CMGFAM(this); +#else + sprintf(g->Message, "Mongo %s Driver not available", "C"); + return NULL; +#endif + } else if (Driver && toupper(*Driver) == 'J') { +#if defined(JAVA_SUPPORT) + txfp = new(g) JMGFAM(this); +#else + sprintf(g->Message, "Mongo %s Driver not available", "Java"); + return NULL; +#endif + } else { // Driver not specified +#if defined(CMGO_SUPPORT) + txfp = new(g) CMGFAM(this); +#elif defined(JAVA_SUPPORT) + txfp = new(g) JMGFAM(this); +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return NULL; +#endif // !MONGO_SUPPORT + } // endif Driver + + } else if (Zipped) { +#if defined(ZIP_SUPPORT) + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { + txfp = new(g) UNZFAM(this); + } else if (m == MODE_INSERT) { + txfp = new(g) ZIPFAM(this); + } else { + strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + return NULL; + } // endif's m +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT + } else if (Compressed) { +#if defined(GZ_SUPPORT) + if (Compressed == 1) + txfp = new(g) GZFAM(this); + else + txfp = new(g) ZLBFAM(this); +#else // !GZ_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ"); + return NULL; +#endif // !GZ_SUPPORT + } else if (map) { + txfp = new(g) MAPFAM(this); + } else + txfp = new(g) DOSFAM(this); + + // Txfp must be set for TDBBSN + tdbp = new(g) TDBBSN(g, this, txfp); + } else { + if (Zipped) { +#if defined(ZIP_SUPPORT) + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { + txfp = new(g) UNZFAM(this); + } else if (m == MODE_INSERT) { + strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0"); + return NULL; + } else { + strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + return NULL; + } // endif's m +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT + } else + txfp = new(g) MAPFAM(this); + + tdbp = new(g) TDBBSON(g, this, txfp); + } // endif Pretty + + if (Multiple) + tdbp = new(g) TDBMUL(tdbp); + + return tdbp; +} // end of GetTable + +/* --------------------------- Class TDBBSN -------------------------- */ + +/***********************************************************************/ +/* Implementation of the TDBBSN class (Pretty < 2) */ +/***********************************************************************/ +TDBBSN::TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) +{ + Bp = new(g) BTUTIL(tdp->G, this); + Top = NULL; + Row = NULL; + Colp = NULL; + + if (tdp) { + Jmode = tdp->Jmode; + Objname = tdp->Objname; + Xcol = tdp->Xcol; + Limit = tdp->Limit; + Pretty = tdp->Pretty; + B = tdp->Base ? 1 : 0; + Sep = tdp->Sep; + Strict = tdp->Strict; + } else { + Jmode = MODE_OBJECT; + Objname = NULL; + Xcol = NULL; + Limit = 1; + Pretty = 0; + B = 0; + Sep = '.'; + Strict = false; + } // endif tdp + + Fpos = -1; + N = M = 0; + NextSame = 0; + SameRow = 0; + Xval = -1; + Comma = false; + Bp->SetPretty(Pretty); +} // end of TDBBSN standard constructor + +TDBBSN::TDBBSN(TDBBSN* tdbp) : TDBDOS(NULL, tdbp) +{ + Bp = tdbp->Bp; + Top = tdbp->Top; + Row = tdbp->Row; + Colp = tdbp->Colp; + Jmode = tdbp->Jmode; + Objname = tdbp->Objname; + Xcol = tdbp->Xcol; + Fpos = tdbp->Fpos; + N = tdbp->N; + M = tdbp->M; + Limit = tdbp->Limit; + NextSame = tdbp->NextSame; + SameRow = tdbp->SameRow; + Xval = tdbp->Xval; + B = tdbp->B; + Sep = tdbp->Sep; + Pretty = tdbp->Pretty; + Strict = tdbp->Strict; + Comma = tdbp->Comma; +} // end of TDBBSN copy constructor + +// Used for update +PTDB TDBBSN::Clone(PTABS t) +{ + PTDB tp; + PBSCOL cp1, cp2; + PGLOBAL g = t->G; + + tp = new(g) TDBBSN(this); + + for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) { + cp2 = new(g) BSONCOL(cp1, tp); // Make a copy + NewPointer(t, cp1, cp2); + } // endfor cp1 + + return tp; +} // end of Clone + +/***********************************************************************/ +/* Allocate JSN column description block. */ +/***********************************************************************/ +PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) +{ + PBSCOL colp = new(g) BSONCOL(g, cdp, this, cprec, n); + + return (colp->ParseJpath(g)) ? NULL : colp; +} // end of MakeCol + +/***********************************************************************/ +/* InsertSpecialColumn: Put a special column ahead of the column list.*/ +/***********************************************************************/ +PCOL TDBBSN::InsertSpecialColumn(PCOL colp) +{ + if (!colp->IsSpecial()) + return NULL; + + //if (Xcol && ((SPCBLK*)colp)->GetRnm()) + // colp->SetKey(0); // Rownum is no more a key + + colp->SetNext(Columns); + Columns = colp; + return colp; +} // end of InsertSpecialColumn + +/***********************************************************************/ +/* JSON Cardinality: returns table size in number of rows. */ +/***********************************************************************/ +int TDBBSN::Cardinality(PGLOBAL g) +{ + if (!g) + return 0; + else if (Cardinal < 0) { + Cardinal = TDBDOS::Cardinality(g); + + } // endif Cardinal + + return Cardinal; +} // end of Cardinality + +/***********************************************************************/ +/* JSON GetMaxSize: returns file size estimate in number of lines. */ +/***********************************************************************/ +int TDBBSN::GetMaxSize(PGLOBAL g) +{ + if (MaxSize < 0) + MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); + + return MaxSize; +} // end of GetMaxSize + +/***********************************************************************/ +/* JSON EstimatedLength. Returns an estimated minimum line length. */ +/***********************************************************************/ +int TDBBSN::EstimatedLength(void) +{ + if (AvgLen <= 0) + return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better + else + return AvgLen; + +} // end of Estimated Length + +/***********************************************************************/ +/* OpenDB: Data Base open routine for JSN access method. */ +/***********************************************************************/ +bool TDBBSN::OpenDB(PGLOBAL g) +{ + TUSE use = Use; + + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open replace it at its beginning. ??? */ + /*******************************************************************/ + Fpos = -1; + NextSame = 0; + SameRow = 0; + } // endif Use + + /*********************************************************************/ + /* Open according to logical input/output mode required. */ + /*********************************************************************/ + if (TDBDOS::OpenDB(g)) + return true; + + if (use == USE_OPEN) + return false; + + if (Pretty < 0) { + /*********************************************************************/ + /* Binary BJSON table. */ + /*********************************************************************/ + xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", + this, Tdb_No, Use, Mode); + + // Lrecl is Ok + size_t linelen = Lrecl; + MODE mode = Mode; + + // Buffer must be allocated in G->Sarea + Mode = MODE_ANY; + Txfp->AllocateBuffer(Bp->G); + Mode = mode; + + if (Mode == MODE_INSERT) + Bp->SubSet(true); + else + Bp->MemSave(); + + To_Line = Txfp->GetBuf(); + memset(To_Line, 0, linelen); + xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); + } // endif Pretty + + /***********************************************************************/ + /* First opening. */ + /***********************************************************************/ + if (Mode == MODE_INSERT) { + int type; + + switch (Jmode) { + case MODE_OBJECT: type = TYPE_JOB; break; + case MODE_ARRAY: type = TYPE_JAR; break; + case MODE_VALUE: type = TYPE_JVAL; break; + default: + sprintf(g->Message, "Invalid Jmode %d", Jmode); + return true; + } // endswitch Jmode + + Top = Bp->MakeTopTree(g, type); + Bp->MemSave(); + } // endif Mode + + if (Xcol) + To_Filter = NULL; // Not compatible + + return false; +} // end of OpenDB + +/***********************************************************************/ +/* SkipHeader: Physically skip first header line if applicable. */ +/* This is called from TDBDOS::OpenDB and must be executed before */ +/* Kindex construction if the file is accessed using an index. */ +/***********************************************************************/ +bool TDBBSN::SkipHeader(PGLOBAL g) +{ + int len = GetFileLength(g); + bool rc = false; + +#if defined(_DEBUG) + if (len < 0) + return true; +#endif // _DEBUG + + if (Pretty == 1) { + if (Mode == MODE_INSERT || Mode == MODE_DELETE) { + // Mode Insert and delete are no more handled here + DBUG_ASSERT(false); + } else if (len > 0) // !Insert && !Delete + rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g)); + + } // endif Pretty + + return rc; +} // end of SkipHeader + +/***********************************************************************/ +/* ReadDB: Data Base read routine for JSN access method. */ +/***********************************************************************/ +int TDBBSN::ReadDB(PGLOBAL g) +{ + int rc; + + N++; + + if (NextSame) { + SameRow = NextSame; + NextSame = 0; + M++; + return RC_OK; + } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) { + if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) + return rc; // Deferred reading failed + + if (Pretty >= 0) { + // Recover the memory used for parsing + Bp->SubSet(); + + if ((Row = Bp->ParseLine(g, Pretty, Comma))) { + Top = Row; + Row = Bp->FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } else if (Pretty != 1 || strcmp(To_Line, "]")) { + Bp->GetMsg(g); + rc = RC_FX; + } else + rc = RC_EF; + + } else { // Here we get a movable Json binary tree + Bp->MemSet(((BINFAM*)Txfp)->Recsize); // Useful when updating + Row = Top = (PBVAL)To_Line; + Row = Bp->FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } // endif Pretty + + } // endif ReadDB + + return rc; +} // end of ReadDB + +/***********************************************************************/ +/* PrepareWriting: Prepare the line for WriteDB. */ +/***********************************************************************/ +bool TDBBSN::PrepareWriting(PGLOBAL g) +{ + if (Pretty >= 0) { + PSZ s; + +// if (!(Top = Bp->MakeTopTree(g, Row->Type))) +// return true; + + if ((s = Bp->SerialVal(g, Top, Pretty))) { + if (Comma) + strcat(s, ","); + + if ((signed)strlen(s) > Lrecl) { + strncpy(To_Line, s, Lrecl); + sprintf(g->Message, "Line truncated (lrecl=%d)", Lrecl); + return PushWarning(g, this); + } else + strcpy(To_Line, s); + + return false; + } else + return true; + } else + ((BINFAM*)Txfp)->Recsize = ((size_t)PlugSubAlloc(Bp->G, NULL, 0) + - (size_t)To_Line); + return false; +} // end of PrepareWriting + +/***********************************************************************/ +/* WriteDB: Data Base write routine for JSON access method. */ +/***********************************************************************/ +int TDBBSN::WriteDB(PGLOBAL g) { + int rc = TDBDOS::WriteDB(g); + + Bp->SubSet(); + Bp->Clear(Row); + return rc; +} // end of WriteDB + +/***********************************************************************/ +/* Data Base close routine for JSON access method. */ +/***********************************************************************/ +void TDBBSN::CloseDB(PGLOBAL g) +{ + TDBDOS::CloseDB(g); + Bp->G = PlugExit(Bp->G); +} // end of CloseDB + +/* ---------------------------- BSONCOL ------------------------------ */ + +/***********************************************************************/ +/* BSONCOL public constructor. */ +/***********************************************************************/ +BSONCOL::BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) + : DOSCOL(g, cdp, tdbp, cprec, i, "DOS") +{ + Tbp = (TDBBSN*)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp); + Cp = new(g) BCUTIL(((PBDEF)Tbp->To_Def)->G, this, Tbp); + Jpath = cdp->GetFmt(); + MulVal = NULL; + Nodes = NULL; + Nod = 0; + Sep = Tbp->Sep; + Xnod = -1; + Xpd = false; + Parsed = false; + Warned = false; +} // end of BSONCOL constructor + +/***********************************************************************/ +/* BSONCOL constructor used for copying columns. */ +/* tdbp is the pointer to the new table descriptor. */ +/***********************************************************************/ +BSONCOL::BSONCOL(BSONCOL* col1, PTDB tdbp) : DOSCOL(col1, tdbp) +{ + Tbp = col1->Tbp; + Cp = col1->Cp; + Jpath = col1->Jpath; + MulVal = col1->MulVal; + Nodes = col1->Nodes; + Nod = col1->Nod; + Sep = col1->Sep; + Xnod = col1->Xnod; + Xpd = col1->Xpd; + Parsed = col1->Parsed; + Warned = col1->Warned; +} // end of BSONCOL copy constructor + +/***********************************************************************/ +/* SetBuffer: prepare a column block for write operation. */ +/***********************************************************************/ +bool BSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) +{ + if (DOSCOL::SetBuffer(g, value, ok, check)) + return true; + + // Parse the json path + if (ParseJpath(g)) + return true; + + Tbp = (TDBBSN*)To_Tdb; + return false; +} // end of SetBuffer + +/***********************************************************************/ +/* Check whether this object is expanded. */ +/***********************************************************************/ +bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) +{ + if ((Tbp->Xcol && nm && !strcmp(nm, Tbp->Xcol) && + (Tbp->Xval < 0 || Tbp->Xval == i)) || Xpd) { + Xpd = true; // Expandable object + Nodes[i].Op = OP_EXP; + } else if (b) { + strcpy(g->Message, "Cannot expand more than one branch"); + return true; + } // endif Xcol + + return false; +} // end of CheckExpand + +/***********************************************************************/ +/* Analyse array processing options. */ +/***********************************************************************/ +bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) +{ + int n; + bool dg = true, b = false; + PJNODE jnp = &Nodes[i]; + + //if (*p == '[') p++; // Old syntax .[ or :[ + n = (int)strlen(p); + + if (*p) { + if (p[n - 1] == ']') { + p[--n] = 0; + } else if (!IsNum(p)) { + // Wrong array specification + sprintf(g->Message, "Invalid array specification %s for %s", p, Name); + return true; + } // endif p + + } else + b = true; + + // To check whether a numeric Rank was specified + dg = IsNum(p); + + if (!n) { + // Default specifications + if (CheckExpand(g, i, nm, false)) + return true; + else if (jnp->Op != OP_EXP) { + if (b) { + // Return 1st value (B is the index base) + jnp->Rank = Tbp->B; + jnp->Op = OP_EQ; + } else if (!Value->IsTypeNum()) { + jnp->CncVal = AllocateValue(g, (void*)", ", TYPE_STRING); + jnp->Op = OP_CNC; + } else + jnp->Op = OP_ADD; + + } // endif OP + + } else if (dg) { + // Return nth value + jnp->Rank = atoi(p) - Tbp->B; + jnp->Op = OP_EQ; + } else if (n == 1) { + // Set the Op value; + if (Sep == ':') + switch (*p) { + case '*': *p = 'x'; break; + case 'x': + case 'X': *p = '*'; break; // Expand this array + default: break; + } // endswitch p + + switch (*p) { + case '+': jnp->Op = OP_ADD; break; + case 'x': jnp->Op = OP_MULT; break; + case '>': jnp->Op = OP_MAX; break; + case '<': jnp->Op = OP_MIN; break; + case '!': jnp->Op = OP_SEP; break; // Average + case '#': jnp->Op = OP_NUM; break; + case '*': // Expand this array + if (!Tbp->Xcol && nm) { + Xpd = true; + jnp->Op = OP_EXP; + Tbp->Xval = i; + Tbp->Xcol = nm; + } else if (CheckExpand(g, i, nm, true)) + return true; + + break; + default: + sprintf(g->Message, + "Invalid function specification %c for %s", *p, Name); + return true; + } // endswitch *p + + } else if (*p == '"' && p[n - 1] == '"') { + // This is a concat specification + jnp->Op = OP_CNC; + + if (n > 2) { + // Set concat intermediate string + p[n - 1] = 0; + jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); + } // endif n + + } else { + sprintf(g->Message, "Wrong array specification for %s", Name); + return true; + } // endif's + + // For calculated arrays, a local Value must be used + switch (jnp->Op) { + case OP_NUM: + jnp->Valp = AllocateValue(g, TYPE_INT); + break; + case OP_ADD: + case OP_MULT: + case OP_SEP: + if (!IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2); + + break; + case OP_MIN: + case OP_MAX: + jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision()); + break; + case OP_CNC: + if (IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_STRING, 512); + + break; + default: + break; + } // endswitch Op + + if (jnp->Valp) + MulVal = AllocateValue(g, jnp->Valp); + + return false; +} // end of SetArrayOptions + +/***********************************************************************/ +/* Parse the eventual passed Jpath information. */ +/* This information can be specified in the Fieldfmt column option */ +/* when creating the table. It permits to indicate the position of */ +/* the node corresponding to that column. */ +/***********************************************************************/ +bool BSONCOL::ParseJpath(PGLOBAL g) +{ + char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; + int i; + bool a; + + if (Parsed) + return false; // Already done + else if (InitValue(g)) + return true; + else if (!Jpath) + Jpath = Name; + + if (To_Tdb->GetOrig()) { + // This is an updated column, get nodes from origin + for (PBSCOL colp = (PBSCOL)Tbp->GetColumns(); colp; + colp = (PBSCOL)colp->GetNext()) + if (!stricmp(Name, colp->GetName())) { + Nod = colp->Nod; + Nodes = colp->Nodes; + Xpd = colp->Xpd; + goto fin; + } // endif Name + + sprintf(g->Message, "Cannot parse updated column %s", Name); + return true; + } // endif To_Orig + + pbuf = PlugDup(g, Jpath); + if (*pbuf == '$') pbuf++; + if (*pbuf == Sep) pbuf++; + if (*pbuf == '[') p1 = pbuf++; + + // Estimate the required number of nodes + for (i = 0, p = pbuf; (p = NextChr(p, Sep)); i++, p++) + Nod++; // One path node found + + Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE)); + memset(Nodes, 0, (Nod) * sizeof(JNODE)); + + // Analyze the Jpath for this column + for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, Sep); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ or :[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + // Jpath must be explicit + if (a || *p == 0 || *p == '[' || IsNum(p)) { + // Analyse intermediate array processing + if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) + return true; + + } else if (*p == '*') { + // Return JSON + Nodes[i].Op = OP_XX; + } else { + Nodes[i].Key = p; + Nodes[i].Op = OP_EXIST; + } // endif's + + } // endfor i, p + + Nod = i; + +fin: + MulVal = AllocateValue(g, Value); + Parsed = true; + return false; +} // end of ParseJpath + +/***********************************************************************/ +/* Get Jpath converted to Mongo path. */ +/***********************************************************************/ +PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj) +{ + if (Jpath) { + char* p1, * p2, * mgopath; + int i = 0; + + if (strcmp(Jpath, "*")) { + p1 = Jpath; + if (*p1 == '$') p1++; + if (*p1 == '.') p1++; + mgopath = PlugDup(g, p1); + } else + return NULL; + + for (p1 = p2 = mgopath; *p1; p1++) + if (i) { // Inside [] + if (isdigit(*p1)) { + if (!proj) + *p2++ = *p1; + + } else if (*p1 == ']' && i == 1) { + if (proj && p1[1] == '.') + p1++; + + i = 0; + } else if (*p1 == '.' && i == 2) { + if (!proj) + *p2++ = '.'; + + i = 0; + } else if (!proj) + return NULL; + + } else switch (*p1) { + case ':': + case '.': + if (isdigit(p1[1])) + i = 2; + + *p2++ = '.'; + break; + case '[': + if (*(p2 - 1) != '.') + *p2++ = '.'; + + i = 1; + break; + case '*': + if (*(p2 - 1) == '.' && !*(p1 + 1)) { + p2--; // Suppress last :* + break; + } // endif p2 + + default: + *p2++ = *p1; + break; + } // endswitch p1; + + *p2 = 0; + return mgopath; + } else + return NULL; + +} // end of GetJpath + +/***********************************************************************/ +/* ReadColumn: */ +/***********************************************************************/ +void BSONCOL::ReadColumn(PGLOBAL g) +{ + if (!Tbp->SameRow || Xnod >= Tbp->SameRow) + Value->SetValue_pval(Cp->GetColumnValue(g, Tbp->Row, 0)); + +#if defined(DEVELOPMENT) + if (Xpd && Value->IsNull() && !((PBDEF)Tbp->To_Def)->Accept) + htrc("Null expandable JSON value for column %s\n", Name); +#endif // DEVELOPMENT + + // Set null when applicable + if (!Nullable) + Value->SetNull(false); + +} // end of ReadColumn + +/***********************************************************************/ +/* WriteColumn: */ +/***********************************************************************/ +void BSONCOL::WriteColumn(PGLOBAL g) +{ + if (Xpd && Tbp->Pretty < 2) { + strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); + throw 666; + } // endif Xpd + + /*********************************************************************/ + /* Check whether this node must be written. */ + /*********************************************************************/ + if (Value != To_Val) + Value->SetValue_pval(To_Val, FALSE); // Convert the updated value + + /*********************************************************************/ + /* On INSERT Null values are represented by no node. */ + /*********************************************************************/ + if (Value->IsNull() && Tbp->Mode == MODE_INSERT) + return; + + PBVAL jsp, row = Cp->GetRow(g); + + if (row) switch (Buf_Type) { + case TYPE_STRING: + case TYPE_DATE: + case TYPE_INT: + case TYPE_TINY: + case TYPE_SHORT: + case TYPE_BIGINT: + case TYPE_DOUBLE: + if (Buf_Type == TYPE_STRING && Nodes[Nod - 1].Op == OP_XX) { + char *s = Value->GetCharValue(); + + if (!(jsp = Cp->ParseJson(g, s, strlen(s)))) { + strcpy(g->Message, s); + throw 666; + } // endif jsp + + switch (row->Type) { + case TYPE_JAR: + if (Nod > 1 && Nodes[Nod - 2].Op == OP_EQ) + Cp->SetArrayValue(row, jsp, Nodes[Nod - 2].Rank); + else + Cp->AddArrayValue(row, jsp); + + break; + case TYPE_JOB: + if (Nod > 1 && Nodes[Nod - 2].Key) + Cp->SetKeyValue(row, jsp, Nodes[Nod - 2].Key); + + break; + case TYPE_JVAL: + default: + Cp->SetValueVal(row, jsp); + } // endswitch Type + + break; + } else + jsp = Cp->NewVal(Value); + + switch (row->Type) { + case TYPE_JAR: + if (Nodes[Nod - 1].Op == OP_EQ) + Cp->SetArrayValue(row, jsp, Nodes[Nod - 1].Rank); + else + Cp->AddArrayValue(row, jsp); + + break; + case TYPE_JOB: + if (Nodes[Nod - 1].Key) + Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key); + + break; + case TYPE_JVAL: + default: + Cp->SetValueVal(row, jsp); + } // endswitch Type + + break; + default: // ?????????? + sprintf(g->Message, "Invalid column type %d", Buf_Type); + } // endswitch Type + +} // end of WriteColumn + +/* -------------------------- Class TDBBSON -------------------------- */ + +/***********************************************************************/ +/* Implementation of the TDBBSON class. */ +/***********************************************************************/ +TDBBSON::TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBBSN(g, tdp, txfp) +{ + Docp = NULL; + Multiple = tdp->Multiple; + Done = Changed = false; + Bp->SetPretty(2); +} // end of TDBBSON standard constructor + +TDBBSON::TDBBSON(PBTDB tdbp) : TDBBSN(tdbp) +{ + Docp = tdbp->Docp; + Multiple = tdbp->Multiple; + Done = tdbp->Done; + Changed = tdbp->Changed; +} // end of TDBBSON copy constructor + +// Used for update +PTDB TDBBSON::Clone(PTABS t) +{ + PTDB tp; + PBSCOL cp1, cp2; + PGLOBAL g = t->G; + + tp = new(g) TDBBSON(this); + + for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) { + cp2 = new(g) BSONCOL(cp1, tp); // Make a copy + NewPointer(t, cp1, cp2); + } // endfor cp1 + + return tp; +} // end of Clone + +/***********************************************************************/ +/* Make the document tree from the object path. */ +/***********************************************************************/ +int TDBBSON::MakeNewDoc(PGLOBAL g) +{ + // Create a void table that will be populated + Docp = Bp->NewVal(TYPE_JAR); + + if (!(Top = Bp->MakeTopTree(g, TYPE_JAR))) + return RC_FX; + + Docp = Row; + Done = true; + return RC_OK; +} // end of MakeNewDoc + +/***********************************************************************/ +/* Make the document tree from a file. */ +/***********************************************************************/ +int TDBBSON::MakeDocument(PGLOBAL g) +{ + char *p, *p1, *p2, *memory, *objpath, *key = NULL; + int i = 0; + size_t len; + my_bool a; + MODE mode = Mode; + PBVAL jsp; + PBVAL objp = NULL; + PBVAL arp = NULL; + PBVAL val = NULL; + + if (Done) + return RC_OK; + + /*********************************************************************/ + /* Create the mapping file object in mode read. */ + /*********************************************************************/ + Mode = MODE_READ; + + if (!Txfp->OpenTableFile(g)) { + PFBLOCK fp = Txfp->GetTo_Fb(); + + if (fp) { + len = fp->Length; + memory = fp->Memory; + } else { + Mode = mode; // Restore saved Mode + return MakeNewDoc(g); + } // endif fp + + } else + return RC_FX; + + /*********************************************************************/ + /* Parse the json file and allocate its tree structure. */ + /*********************************************************************/ + g->Message[0] = 0; + jsp = Top = Bp->ParseJson(g, memory, len); + Txfp->CloseTableFile(g, false); + Mode = mode; // Restore saved Mode + + if (!jsp && g->Message[0]) + return RC_FX; + + if ((objpath = PlugDup(g, Objname))) { + if (*objpath == '$') objpath++; + if (*objpath == '.') objpath++; + p1 = (*objpath == '[') ? objpath++ : NULL; + + /*********************************************************************/ + /* Find the table in the tree structure. */ + /*********************************************************************/ + for (p = objpath; jsp && p; p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, '.'); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + if (!a && *p && *p != '[' && !IsNum(p)) { + // obj is a key + if (jsp->Type != TYPE_JOB) { + strcpy(g->Message, "Table path does not match the json file"); + return RC_FX; + } // endif Type + + key = p; + objp = jsp; + arp = NULL; + val = Bp->GetKeyValue(objp, key); + + if (!val || !(jsp = Bp->GetBson(val))) { + sprintf(g->Message, "Cannot find object key %s", key); + return RC_FX; + } // endif val + + } else { + if (*p == '[') { + // Old style + if (p[strlen(p) - 1] != ']') { + sprintf(g->Message, "Invalid Table path near %s", p); + return RC_FX; + } else + p++; + + } // endif p + + if (jsp->Type != TYPE_JAR) { + strcpy(g->Message, "Table path does not match the json file"); + return RC_FX; + } // endif Type + + arp = jsp; + objp = NULL; + i = atoi(p) - B; + val = Bp->GetArrayValue(arp, i); + + if (!val) { + sprintf(g->Message, "Cannot find array value %d", i); + return RC_FX; + } // endif val + + } // endif + + jsp = val; + } // endfor p + + } // endif objpath + + if (jsp && jsp->Type == TYPE_JAR) + Docp = jsp; + else { + // The table is void or is just one object or one value + if (objp) { + Docp = Bp->GetKeyValue(objp, key); + Docp->To_Val = Bp->MOF(Bp->DupVal(Docp)); + Docp->Type = TYPE_JAR; + } else if (arp) { + Docp = Bp->NewVal(TYPE_JAR); + Bp->AddArrayValue(Docp, jsp); + Bp->SetArrayValue(arp, Docp, i); + } else { + Top = Docp = Bp->NewVal(TYPE_JAR); + Bp->AddArrayValue(Docp, jsp); + } // endif's + + } // endif jsp + + Done = true; + return RC_OK; +} // end of MakeDocument + +/***********************************************************************/ +/* JSON Cardinality: returns table size in number of rows. */ +/***********************************************************************/ +int TDBBSON::Cardinality(PGLOBAL g) +{ + if (!g) + return (Xcol || Multiple) ? 0 : 1; + else if (Cardinal < 0) { + if (!Multiple) { + if (MakeDocument(g) == RC_OK) + Cardinal = Bp->GetSize(Docp); + + } else + return 10; + + } // endif Cardinal + + return Cardinal; +} // end of Cardinality + +/***********************************************************************/ +/* JSON GetMaxSize: returns table size estimate in number of rows. */ +/***********************************************************************/ +int TDBBSON::GetMaxSize(PGLOBAL g) +{ + if (MaxSize < 0) + MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1); + + return MaxSize; +} // end of GetMaxSize + +/***********************************************************************/ +/* ResetSize: call by TDBMUL when calculating size estimate. */ +/***********************************************************************/ +void TDBBSON::ResetSize(void) +{ + MaxSize = Cardinal = -1; + Fpos = -1; + N = 0; + Done = false; +} // end of ResetSize + +/***********************************************************************/ +/* TDBBSON is not indexable. */ +/***********************************************************************/ +int TDBBSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool) +{ + if (pxdf) { + strcpy(g->Message, "JSON not indexable when pretty = 2"); + return RC_FX; + } else + return RC_OK; + +} // end of MakeIndex + +/***********************************************************************/ +/* Return the position in the table. */ +/***********************************************************************/ +int TDBBSON::GetRecpos(void) +{ +#if 0 + union { + uint Rpos; + BYTE Spos[4]; + }; + + Rpos = htonl(Fpos); + Spos[0] = (BYTE)NextSame; + return Rpos; +#endif // 0 + return Fpos; +} // end of GetRecpos + +/***********************************************************************/ +/* Set the position in the table. */ +/***********************************************************************/ +bool TDBBSON::SetRecpos(PGLOBAL, int recpos) +{ +#if 0 + union { + uint Rpos; + BYTE Spos[4]; + }; + + Rpos = recpos; + NextSame = Spos[0]; + Spos[0] = 0; + Fpos = (signed)ntohl(Rpos); + + //if (Fpos != (signed)ntohl(Rpos)) { + // Fpos = ntohl(Rpos); + // same = false; + //} else + // same = true; +#endif // 0 + + Fpos = recpos - 1; + return false; +} // end of SetRecpos + +/***********************************************************************/ +/* JSON Access Method opening routine. */ +/***********************************************************************/ +bool TDBBSON::OpenDB(PGLOBAL g) +{ + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open replace it at its beginning. */ + /*******************************************************************/ + Fpos = -1; + NextSame = false; + SameRow = 0; + return false; + } // endif use + +/*********************************************************************/ +/* OpenDB: initialize the JSON file processing. */ +/*********************************************************************/ + if (MakeDocument(g) != RC_OK) + return true; + + if (Mode == MODE_INSERT) + switch (Jmode) { + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break; + default: + sprintf(g->Message, "Invalid Jmode %d", Jmode); + return true; + } // endswitch Jmode + + if (Xcol) + To_Filter = NULL; // Imcompatible + + Use = USE_OPEN; + return false; +} // end of OpenDB + +/***********************************************************************/ +/* ReadDB: Data Base read routine for JSON access method. */ +/***********************************************************************/ +int TDBBSON::ReadDB(PGLOBAL) +{ + int rc; + + N++; + + if (NextSame) { + SameRow = NextSame; + NextSame = false; + M++; + rc = RC_OK; + } else if (++Fpos < (signed)Bp->GetSize(Docp)) { + Row = Bp->GetArrayValue(Docp, Fpos); + + if (Row->Type == TYPE_JVAL) + Row = Bp->GetBson(Row); + + SameRow = 0; + M = 1; + rc = RC_OK; + } else + rc = RC_EF; + + return rc; +} // end of ReadDB + +/***********************************************************************/ +/* WriteDB: Data Base write routine for JSON access method. */ +/***********************************************************************/ +int TDBBSON::WriteDB(PGLOBAL g) +{ + if (Mode == MODE_INSERT) { + Bp->AddArrayValue(Docp, Row); + + switch(Jmode) { + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + default: Row = Bp->NewVal(); break; + } // endswitch Jmode + + } else + Bp->SetArrayValue(Docp, Row, Fpos); + + Changed = true; + return RC_OK; +} // end of WriteDB + +/***********************************************************************/ +/* Data Base delete line routine for JSON access method. */ +/***********************************************************************/ +int TDBBSON::DeleteDB(PGLOBAL g, int irc) +{ + if (irc == RC_OK) + // Deleted current row + Bp->DeleteValue(Docp, Fpos); + else if (irc == RC_FX) + // Delete all + Docp->To_Val = 0; + + Changed = true; + return RC_OK; +} // end of DeleteDB + +/***********************************************************************/ +/* Data Base close routine for JSON access methods. */ +/***********************************************************************/ +void TDBBSON::CloseDB(PGLOBAL g) +{ + if (!Changed) + return; + + // Save the modified document + char filename[_MAX_PATH]; + +//Docp->InitArray(g); + + // We used the file name relative to recorded datapath + PlugSetPath(filename, ((PBDEF)To_Def)->Fn, GetPath()); + + // Serialize the modified table + if (!Bp->Serialize(g, Top, filename, Pretty)) + puts(g->Message); + +} // end of CloseDB + +/* ---------------------------TDBBCL class --------------------------- */ + +/***********************************************************************/ +/* TDBBCL class constructor. */ +/***********************************************************************/ +TDBBCL::TDBBCL(PBDEF tdp) : TDBCAT(tdp) { + Topt = tdp->GetTopt(); + Db = tdp->Schema; + Dsn = tdp->Uri; +} // end of TDBBCL constructor + +/***********************************************************************/ +/* GetResult: Get the list the JSON file columns. */ +/***********************************************************************/ +PQRYRES TDBBCL::GetResult(PGLOBAL g) { + return BSONColumns(g, Db, Dsn, Topt, false); +} // end of GetResult + +/* --------------------------- End of json --------------------------- */ diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h new file mode 100644 index 00000000000..adb02dd28e4 --- /dev/null +++ b/storage/connect/tabbson.h @@ -0,0 +1,339 @@ +/*************** tabbson H Declares Source Code File (.H) **************/ +/* Name: tabbson.h Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* */ +/* This file contains the BSON classes declares. */ +/***********************************************************************/ +#pragma once +#include "block.h" +#include "colblk.h" +#include "bson.h" +#include "tabjson.h" + +typedef class BTUTIL* PBTUT; +typedef class BCUTIL* PBCUT; +typedef class BSONDEF* PBDEF; +typedef class TDBBSON* PBTDB; +typedef class BSONCOL* PBSCOL; +class TDBBSN; +DllExport PQRYRES BSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool); + +/***********************************************************************/ +/* Class used to get the columns of a mongo collection. */ +/***********************************************************************/ +class BSONDISC : public BLOCK { +public: + // Constructor + BSONDISC(PGLOBAL g, uint* lg); + + // Functions + int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt); + bool Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j); + void AddColumn(PGLOBAL g); + + // Members + JCOL jcol; + PJCL jcp, fjcp, pjcp; + //PVL vlp; + PBDEF tdp; + TDBBSN *tjnp; + PBTDB tjsp; + PBPR jpp; + PBVAL jsp; + PBPR row; + PBTUT bp; + PCSZ sep; + char colname[65], fmt[129], buf[16]; + uint *length; + int i, n, bf, ncol, lvl, sz, limit; + bool all, strfy; +}; // end of BSONDISC + +/***********************************************************************/ +/* JSON table. */ +/***********************************************************************/ +class DllExport BSONDEF : public DOSDEF { /* Table description */ + friend class TDBBSON; + friend class TDBBSN; + friend class TDBBCL; + friend class BSONDISC; + friend class BSONCOL; +#if defined(CMGO_SUPPORT) + friend class CMGFAM; +#endif // CMGO_SUPPORT +#if defined(JAVA_SUPPORT) + friend class JMGFAM; +#endif // JAVA_SUPPORT +public: + // Constructor + BSONDEF(void); + + // Implementation + virtual const char* GetType(void) { return "BSON"; } + + // Methods + virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); + virtual PTDB GetTable(PGLOBAL g, MODE m); + +protected: + // Members + PGLOBAL G; /* Bson utility memory */ + JMODE Jmode; /* MODE_OBJECT by default */ + PCSZ Objname; /* Name of first level object */ + PCSZ Xcol; /* Name of expandable column */ + int Limit; /* Limit of multiple values */ + int Pretty; /* Depends on file structure */ + int Base; /* The array index base */ + bool Strict; /* Strict syntax checking */ + char Sep; /* The Jpath separator */ + const char* Uri; /* MongoDB connection URI */ + PCSZ Collname; /* External collection name */ + PSZ Options; /* Colist ; Pipe */ + PSZ Filter; /* Filter */ + PSZ Driver; /* MongoDB Driver (C or JAVA) */ + bool Pipe; /* True if Colist is a pipeline */ + int Version; /* Driver version */ + PSZ Wrapname; /* MongoDB java wrapper name */ +}; // end of BSONDEF + + +/* -------------------------- BTUTIL class --------------------------- */ + +/***********************************************************************/ +/* Handles all BJSON actions for a BSON table. */ +/***********************************************************************/ +class BTUTIL : public BDOC { +public: + // Constructor + BTUTIL(PGLOBAL G, TDBBSN* tp) : BDOC(G) { Tp = tp; } + + // Utility functions + PBVAL FindRow(PGLOBAL g); + PBVAL ParseLine(PGLOBAL g, int prty, bool cma); + PBVAL MakeTopTree(PGLOBAL g, int type); + PSZ SerialVal(PGLOBAL g, PBVAL top, int pretty); + +protected: + // Members + TDBBSN* Tp; +}; // end of class BTUTIL + +/* -------------------------- BCUTIL class --------------------------- */ + +/***********************************************************************/ +/* Handles all BJSON actions for a BSON columns. */ +/***********************************************************************/ +class BCUTIL : public BTUTIL { +public: + // Constructor + BCUTIL(PGLOBAL G, PBSCOL cp, TDBBSN* tp) : BTUTIL(G, tp) + { Cp = cp; Jb = false; } + + // Utility functions + void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp); + PBVAL MakeBson(PGLOBAL g, PBVAL jsp, int n); + PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i); + PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); + PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); + PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); + PBVAL GetRow(PGLOBAL g); + +protected: + // Member + PBSCOL Cp; + bool Jb; +}; // end of class BCUTIL + + /* -------------------------- TDBBSN class --------------------------- */ + +/***********************************************************************/ +/* This is the BSN Access Method class declaration. */ +/* The table is a DOS file, each record being a JSON object. */ +/***********************************************************************/ +class DllExport TDBBSN : public TDBDOS { + friend class BSONCOL; + friend class BSONDEF; + friend class BTUTIL; + friend class BCUTIL; + friend class BSONDISC; +#if defined(CMGO_SUPPORT) + friend class CMGFAM; +#endif // CMGO_SUPPORT +#if defined(JAVA_SUPPORT) + friend class JMGFAM; +#endif // JAVA_SUPPORT +public: + // Constructor + TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp); + TDBBSN(TDBBSN* tdbp); + + // Implementation + virtual AMT GetAmType(void) { return TYPE_AM_JSN; } + virtual bool SkipHeader(PGLOBAL g); + virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSN(this); } + PBVAL GetRow(void) { return Row; } + + // Methods + virtual PTDB Clone(PTABS t); + virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual PCOL InsertSpecialColumn(PCOL colp); + virtual int RowNumber(PGLOBAL g, bool b = FALSE) {return (b) ? M : N;} + virtual bool CanBeFiltered(void) + {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;} + + // Database routines + virtual int Cardinality(PGLOBAL g); + virtual int GetMaxSize(PGLOBAL g); + virtual bool OpenDB(PGLOBAL g); + virtual int ReadDB(PGLOBAL g); + virtual bool PrepareWriting(PGLOBAL g); + virtual int WriteDB(PGLOBAL g); + virtual void CloseDB(PGLOBAL g); + + // Specific routine + virtual int EstimatedLength(void); + +protected: + PBVAL FindRow(PGLOBAL g); +//int MakeTopTree(PGLOBAL g, PBVAL jsp); + + // Members + PBTUT Bp; // The BSUTIL handling class + PBVAL Top; // The top JSON tree + PBVAL Row; // The current row + PBSCOL Colp; // The multiple column + JMODE Jmode; // MODE_OBJECT by default + PCSZ Objname; // The table object name + PCSZ Xcol; // Name of expandable column + int Fpos; // The current row index + int N; // The current Rownum + int M; // Index of multiple value + int Limit; // Limit of multiple values + int Pretty; // Depends on file structure + int NextSame; // Same next row + int SameRow; // Same row nb + int Xval; // Index of expandable array + int B; // Array index base + char Sep; // The Jpath separator + bool Strict; // Strict syntax checking + bool Comma; // Row has final comma +}; // end of class TDBBSN + +/* -------------------------- BSONCOL class -------------------------- */ + +/***********************************************************************/ +/* Class BSONCOL: JSON access method column descriptor. */ +/***********************************************************************/ +class DllExport BSONCOL : public DOSCOL { + friend class TDBBSN; + friend class TDBBSON; + friend class BCUTIL; +#if defined(CMGO_SUPPORT) + friend class CMGFAM; +#endif // CMGO_SUPPORT +#if defined(JAVA_SUPPORT) + friend class JMGFAM; +#endif // JAVA_SUPPORT +public: + // Constructors + BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i); + BSONCOL(BSONCOL* colp, PTDB tdbp); // Constructor used in copy process + + // Implementation + virtual int GetAmType(void) { return Tbp->GetAmType(); } + + // Methods + virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); + bool ParseJpath(PGLOBAL g); + virtual PSZ GetJpath(PGLOBAL g, bool proj); + virtual void ReadColumn(PGLOBAL g); + virtual void WriteColumn(PGLOBAL g); + +protected: + bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); + bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); + + // Default constructor not to be used + BSONCOL(void) {} + + // Members + TDBBSN *Tbp; // To the JSN table block + PBCUT Cp; // To the BCUTIL handling class + PVAL MulVal; // To value used by multiple column + char *Jpath; // The json path + JNODE *Nodes; // The intermediate objects + int Nod; // The number of intermediate objects + int Xnod; // Index of multiple values + char Sep; // The Jpath separator + bool Xpd; // True for expandable column + bool Parsed; // True when parsed + bool Warned; // True when warning issued +}; // end of class BSONCOL + +/* -------------------------- TDBBSON class -------------------------- */ + +/***********************************************************************/ +/* This is the JSON Access Method class declaration. */ +/***********************************************************************/ +class DllExport TDBBSON : public TDBBSN { + friend class BSONDEF; + friend class BSONCOL; +public: + // Constructor + TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp); + TDBBSON(PBTDB tdbp); + + // Implementation + virtual AMT GetAmType(void) { return TYPE_AM_JSON; } + virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSON(this); } + PBVAL GetDoc(void) { return Docp; } + + // Methods + virtual PTDB Clone(PTABS t); + + // Database routines + virtual int Cardinality(PGLOBAL g); + virtual int GetMaxSize(PGLOBAL g); + virtual void ResetSize(void); + virtual int GetProgCur(void) { return N; } + virtual int GetRecpos(void); + virtual bool SetRecpos(PGLOBAL g, int recpos); + virtual bool OpenDB(PGLOBAL g); + virtual int ReadDB(PGLOBAL g); + virtual bool PrepareWriting(PGLOBAL g) { return false; } + virtual int WriteDB(PGLOBAL g); + virtual int DeleteDB(PGLOBAL g, int irc); + virtual void CloseDB(PGLOBAL g); + int MakeDocument(PGLOBAL g); + + // Optimization routines + virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add); + +protected: + int MakeNewDoc(PGLOBAL g); + + // Members + PBVAL Docp; // The document array + int Multiple; // 0: No 1: DIR 2: Section 3: filelist + bool Done; // True when document parsing is done + bool Changed; // After Update, Insert or Delete +}; // end of class TDBBSON + +/***********************************************************************/ +/* This is the class declaration for the JSON catalog table. */ +/***********************************************************************/ +class DllExport TDBBCL : public TDBCAT { +public: + // Constructor + TDBBCL(PBDEF tdp); + +protected: + // Specific routines + virtual PQRYRES GetResult(PGLOBAL g); + + // Members + PTOS Topt; + PCSZ Db; + PCSZ Dsn; +}; // end of class TDBBCL diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 3002f8906ed..fa764b1f84d 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1995,7 +1995,7 @@ int TDBDOS::Cardinality(PGLOBAL g) if (Mode == MODE_ANY && ExactInfo()) { // Using index impossible or failed, do it the hard way Mode = MODE_READ; - To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); + To_Line = (char*)PlugSubAlloc(g, NULL, (size_t)Lrecl + 1); if (Txfp->OpenTableFile(g)) return (Cardinal = Txfp->Cardinality(g)); @@ -2145,6 +2145,9 @@ bool TDBDOS::OpenDB(PGLOBAL g) } // endif use if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS +#if defined(BSON_SUPPORT) + && Txfp->GetAmType() != TYPE_AM_BIN +#endif // BSON_SUPPORT && Txfp->GetAmType() != TYPE_AM_MGO) { // Delete all lines. Not handled in MAP or block mode Txfp = new(g) DOSFAM((PDOSDEF)To_Def); @@ -2229,7 +2232,7 @@ int TDBDOS::ReadDB(PGLOBAL g) return RC_EF; case -2: // No match for join return RC_NF; - case -3: // Same record as last non null one + case -3: // Same record as non null last one num_there++; return RC_OK; default: diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index eed67525f78..37d28b96517 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -67,7 +67,7 @@ /* This should be an option. */ /***********************************************************************/ #define MAXCOL 200 /* Default max column nb in result */ -#define TYPE_UNKNOWN 10 /* Must be greater than other types */ +#define TYPE_UNKNOWN 12 /* Must be greater than other types */ /***********************************************************************/ /* External function. */ @@ -311,14 +311,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) } else if (*p == q) { if (phase == 0) { - if (blank) - { + if (blank) { if (++nerr > mxr) { sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read); goto err; } else goto skip; } + n = 0; phase = digit = 1; } else if (phase == 1) { @@ -342,14 +342,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) goto skip; } else { - if (phase == 2) - { + if (phase == 2) { if (++nerr > mxr) { sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read); goto err; } else goto skip; } + // isdigit cannot be used here because of debug assert if (!strchr("0123456789", *p)) { if (!digit && *p == dechar) @@ -364,14 +364,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) blank = 1; } // endif's *p - if (phase == 1) - { + if (phase == 1) { if (++nerr > mxr) { sprintf(g->Message, MSG(UNBALANCE_QUOTE), num_read); goto err; } else goto skip; } + if (n) { len[i] = MY_MAX(len[i], n); type = (digit || n == 0 || (dec && n == 1)) ? TYPE_STRING @@ -744,8 +744,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) int i, len; PCSVCOL colp; - if (!Fields) // May have been set in TABFMT::OpenDB - { + if (!Fields) { // May have been set in TABFMT::OpenDB if (Mode != MODE_UPDATE && Mode != MODE_INSERT) { for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next) if (!colp->IsSpecial() && !colp->IsVirtual()) @@ -759,6 +758,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) if (!cdp->IsSpecial() && !cdp->IsVirtual()) Fields++; } + Offset = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields); Fldlen = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields); @@ -778,8 +778,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) } // endfor i - if (Field) - { + if (Field) { // Prepare writing fields if (Mode != MODE_UPDATE) { for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next) @@ -803,6 +802,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) Fldtyp[i] = IsTypeNum(cdp->GetType()); } // endif cdp } + } // endif Use if (Header) { @@ -1051,8 +1051,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) if (i) strcat(To_Line, sep); - if (Field[i]) - { + if (Field[i]) { if (!strlen(Field[i])) { // Generally null fields are not quoted if (Quoted > 2) @@ -1060,7 +1059,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) strcat(strcat(To_Line, qot), qot); } else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot - || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) + || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) { if (strchr(Field[i], Qot)) { // Field contains quotes that must be doubled int j, k = strlen(To_Line), n = strlen(Field[i]); @@ -1078,10 +1077,12 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) To_Line[k] = '\0'; } else strcat(strcat(strcat(To_Line, qot), Field[i]), qot); + } else strcat(To_Line, Field[i]); } + } // endfor i #if defined(_DEBUG) diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index dbcd590c3de..402a0a1de37 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1,8 +1,9 @@ /************* tabjson C++ Program Source Code File (.CPP) *************/ -/* PROGRAM NAME: tabjson Version 1.7 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2019 */ +/* PROGRAM NAME: tabjson Version 1.8 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ /* This program are the JSON class DB execution routines. */ /***********************************************************************/ +#undef BSON_SUPPORT /***********************************************************************/ /* Include relevant sections of the MariaDB header file. */ @@ -46,7 +47,7 @@ /* This should be an option. */ /***********************************************************************/ #define MAXCOL 200 /* Default max column nb in result */ -#define TYPE_UNKNOWN 12 /* Must be greater than other types */ +//#define TYPE_UNKNOWN 12 /* Must be greater than other types */ /***********************************************************************/ /* External functions. */ @@ -114,7 +115,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) /*********************************************************************/ for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) { if (jcp->Type == TYPE_UNKNOWN) - jcp->Type = TYPE_STRING; // Void column + jcp->Type = TYPE_STRG; // Void column crp = qrp->Colresp; // Column Name crp->Kdata->SetValue(jcp->Name, i); @@ -152,26 +153,29 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg) { length = lg; jcp = fjcp = pjcp = NULL; + tdp = NULL; tjnp = NULL; jpp = NULL; tjsp = NULL; jsp = NULL; row = NULL; sep = NULL; - i = n = bf = ncol = lvl = sz = 0; + i = n = bf = ncol = lvl = sz = limit = 0; all = strfy = false; } // end of JSONDISC constructor int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) { - char filename[_MAX_PATH]; - bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + char filename[_MAX_PATH]; + bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + PGLOBAL G = NULL; lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth()); lvl = GetIntegerTableOption(g, topt, "Depth", lvl); sep = GetStringTableOption(g, topt, "Separator", "."); sz = GetIntegerTableOption(g, topt, "Jsize", 1024); - strfy = GetBooleanTableOption(g, topt, "Stringify", false); + limit = GetIntegerTableOption(g, topt, "Limit", 10); + strfy = GetBooleanTableOption(g, topt, "Stringify", false); /*********************************************************************/ /* Open the input file. */ @@ -240,7 +244,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) if (tjsp->MakeDocument(g)) return 0; - jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL; + jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetArrayValue(0) : NULL; } else { if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0))) { @@ -286,18 +290,15 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) #endif } // endif Driver - } else + } else if (tdp->Pretty >= 0) tjnp = new(g) TDBJSN(tdp, new(g) DOSFAM(tdp)); + else + tjnp = new(g) TDBJSN(tdp, new(g) BINFAM(tdp)); tjnp->SetMode(MODE_READ); // Allocate the parse work memory - PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); - memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = (size_t)tdp->Lrecl * 10; - G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); - PlugSubSet(G->Sarea, G->Sarea_Size); - G->jump_level = 0; + G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2)); tjnp->SetG(G); if (tjnp->OpenDB(g)) @@ -309,7 +310,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = tjnp->GetRow(); +// jsp = tjnp->FindRow(g); // FindRow was done in ReadDB + jsp = tjnp->Row; } // endswitch ReadDB } // endif pretty @@ -335,11 +337,11 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) /* Analyse the JSON tree and define columns. */ /*********************************************************************/ for (i = 1; ; i++) { - for (jpp = row->GetFirst(); jpp; jpp = jpp->GetNext()) { - strncpy(colname, jpp->GetKey(), 64); + for (jpp = row->GetFirst(); jpp; jpp = jpp->Next) { + strncpy(colname, jpp->Key, 64); fmt[bf] = 0; - if (Find(g, jpp->GetVal(), colname, MY_MIN(lvl, 0))) + if (Find(g, jpp->Val, colname, MY_MIN(lvl, 0))) goto err; } // endfor jpp @@ -359,11 +361,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = tjnp->GetRow(); +// jsp = tjnp->FindRow(g); + jsp = tjnp->Row; } // endswitch ReadDB } else - jsp = tjsp->GetDoc()->GetValue(i); + jsp = tjsp->GetDoc()->GetArrayValue(i); if (!(row = (jsp) ? jsp->GetObject() : NULL)) break; @@ -390,14 +393,35 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) PJOB job; PJAR jar; - if ((valp = jvp ? jvp->GetValue() : NULL)) { - if (JsonAllPath() && !fmt[bf]) + if (jvp && jvp->DataType != TYPE_JSON) { + if (JsonAllPath() && !fmt[bf]) strcat(fmt, colname); - jcol.Type = valp->GetType(); - jcol.Len = valp->GetValLen(); - jcol.Scale = valp->GetValPrec(); - jcol.Cbn = valp->IsNull(); + jcol.Type = jvp->DataType; + + switch (jvp->DataType) { + case TYPE_STRG: + case TYPE_DTM: + jcol.Len = (int)strlen(jvp->Strp); + break; + case TYPE_INTG: + case TYPE_BINT: + jcol.Len = (int)strlen(jvp->GetString(g)); + break; + case TYPE_DBL: + jcol.Len = (int)strlen(jvp->GetString(g)); + jcol.Scale = jvp->Nd; + break; + case TYPE_BOOL: + jcol.Len = 1; + break; + default: + jcol.Len = 0; + break; + } // endswitch Type + + jcol.Scale = jvp->Nd; + jcol.Cbn = jvp->DataType == TYPE_NULL; } else if (!jvp || jvp->IsNull()) { jcol.Type = TYPE_UNKNOWN; jcol.Len = jcol.Scale = 0; @@ -413,8 +437,8 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) case TYPE_JOB: job = (PJOB)jsp; - for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) { - PCSZ k = jrp->GetKey(); + for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->Next) { + PCSZ k = jrp->Key; if (*k != '$') { n = sizeof(fmt) - strlen(fmt) -1; @@ -423,7 +447,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) strncat(strncat(colname, "_", n), k, n - 1); } // endif Key - if (Find(g, jrp->GetVal(), k, j + 1)) + if (Find(g, jrp->Val, k, j + 1)) return true; *p = *pc = 0; @@ -434,7 +458,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) jar = (PJAR)jsp; if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key))) - ars = jar->GetSize(false); + ars = MY_MIN(jar->GetSize(false), limit); else ars = MY_MIN(jar->GetSize(false), 1); @@ -460,7 +484,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) strncat(fmt, (tdp->Uri ? sep : "[*]"), n); } - if (Find(g, jar->GetValue(k), "", j)) + if (Find(g, jar->GetArrayValue(k), "", j)) return true; *p = *pc = 0; @@ -481,7 +505,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) } else if (JsonAllPath() && !fmt[bf]) strcat(fmt, colname); - jcol.Type = TYPE_STRING; + jcol.Type = TYPE_STRG; jcol.Len = sz; jcol.Scale = 0; jcol.Cbn = true; @@ -503,10 +527,29 @@ void JSONDISC::AddColumn(PGLOBAL g) if (jcp) { if (jcp->Type != jcol.Type) { - if (jcp->Type == TYPE_UNKNOWN) + if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL) jcp->Type = jcol.Type; - else if (jcol.Type != TYPE_UNKNOWN) - jcp->Type = TYPE_STRING; +// else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID) +// jcp->Type = TYPE_STRING; + else if (jcp->Type != TYPE_STRG) + switch (jcol.Type) { + case TYPE_STRG: + case TYPE_DBL: + jcp->Type = jcol.Type; + break; + case TYPE_BINT: + if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL) + jcp->Type = jcol.Type; + + break; + case TYPE_INTG: + if (jcp->Type == TYPE_BOOL) + jcp->Type = jcol.Type; + + break; + default: + break; + } // endswith Type } // endif Type @@ -625,9 +668,9 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) PTXF txfp = NULL; // JSN not used for pretty=1 for insert or delete - if (!Pretty || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { + if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { USETEMP tmp = UseTemp(); - bool map = Mapped && m != MODE_INSERT && + bool map = Mapped && Pretty >= 0 && m != MODE_INSERT && !(tmp != TMP_NO && m == MODE_UPDATE) && !(tmp == TMP_FORCE && (m == MODE_UPDATE || m == MODE_DELETE)); @@ -684,21 +727,26 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) #endif // !GZ_SUPPORT } else if (map) txfp = new(g) MAPFAM(this); - else + else if (Pretty < 0) // BJsonfile + txfp = new(g) BINFAM(this); + else txfp = new(g) DOSFAM(this); - // Txfp must be set for TDBDOS + // Txfp must be set for TDBJSN tdbp = new(g) TDBJSN(this, txfp); if (Lrecl) { // Allocate the parse work memory +#if 0 PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = Lrecl * 10; + G->Sarea_Size = (size_t)Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; ((TDBJSN*)tdbp)->G = G; +#endif // 0 + ((TDBJSN*)tdbp)->G = PlugInit(NULL, (size_t)Lrecl * (Pretty >= 0 ? 10 : 2)); } else { strcpy(g->Message, "LRECL is not defined"); return NULL; @@ -736,10 +784,10 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) /* --------------------------- Class TDBJSN -------------------------- */ /***********************************************************************/ -/* Implementation of the TDBJSN class. */ +/* Implementation of the TDBJSN class (Pretty < 2) */ /***********************************************************************/ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) - { +{ G = NULL; Top = NULL; Row = NULL; @@ -772,35 +820,35 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) SameRow = 0; Xval = -1; Comma = false; - } // end of TDBJSN standard constructor +} // end of TDBJSN standard constructor -TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp) - { - G = NULL; - Top = tdbp->Top; - Row = tdbp->Row; - Val = tdbp->Val; - Colp = tdbp->Colp; - Jmode = tdbp->Jmode; - Objname = tdbp->Objname; - Xcol = tdbp->Xcol; - Fpos = tdbp->Fpos; - N = tdbp->N; - M = tdbp->M; - Limit = tdbp->Limit; - NextSame = tdbp->NextSame; - SameRow = tdbp->SameRow; - Xval = tdbp->Xval; - B = tdbp->B; - Sep = tdbp->Sep; - Pretty = tdbp->Pretty; - Strict = tdbp->Strict; - Comma = tdbp->Comma; - } // end of TDBJSN copy constructor +TDBJSN::TDBJSN(TDBJSN* tdbp) : TDBDOS(NULL, tdbp) +{ + G = NULL; + Top = tdbp->Top; + Row = tdbp->Row; + Val = tdbp->Val; + Colp = tdbp->Colp; + Jmode = tdbp->Jmode; + Objname = tdbp->Objname; + Xcol = tdbp->Xcol; + Fpos = tdbp->Fpos; + N = tdbp->N; + M = tdbp->M; + Limit = tdbp->Limit; + NextSame = tdbp->NextSame; + SameRow = tdbp->SameRow; + Xval = tdbp->Xval; + B = tdbp->B; + Sep = tdbp->Sep; + Pretty = tdbp->Pretty; + Strict = tdbp->Strict; + Comma = tdbp->Comma; +} // end of TDBJSN copy constructor // Used for update PTDB TDBJSN::Clone(PTABS t) - { +{ G = NULL; PTDB tp; PJCOL cp1, cp2; @@ -814,23 +862,23 @@ PTDB TDBJSN::Clone(PTABS t) } // endfor cp1 return tp; - } // end of Clone +} // end of Clone /***********************************************************************/ /* Allocate JSN column description block. */ /***********************************************************************/ PCOL TDBJSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) - { +{ PJCOL colp = new(g) JSONCOL(g, cdp, this, cprec, n); return (colp->ParseJpath(g)) ? NULL : colp; - } // end of MakeCol +} // end of MakeCol /***********************************************************************/ /* InsertSpecialColumn: Put a special column ahead of the column list.*/ /***********************************************************************/ PCOL TDBJSN::InsertSpecialColumn(PCOL colp) - { +{ if (!colp->IsSpecial()) return NULL; @@ -840,31 +888,47 @@ PCOL TDBJSN::InsertSpecialColumn(PCOL colp) colp->SetNext(Columns); Columns = colp; return colp; - } // end of InsertSpecialColumn +} // end of InsertSpecialColumn +#if 0 /***********************************************************************/ /* JSON Cardinality: returns table size in number of rows. */ /***********************************************************************/ int TDBJSN::Cardinality(PGLOBAL g) - { +{ if (!g) return 0; - else if (Cardinal < 0) - Cardinal = TDBDOS::Cardinality(g); + else if (Cardinal < 0) { + Cardinal = TDBDOS::Cardinality(g); + + } // endif Cardinal return Cardinal; - } // end of Cardinality +} // end of Cardinality /***********************************************************************/ /* JSON GetMaxSize: returns file size estimate in number of lines. */ /***********************************************************************/ int TDBJSN::GetMaxSize(PGLOBAL g) - { - if (MaxSize < 0) - MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); +{ + if (MaxSize < 0) + MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); return MaxSize; - } // end of GetMaxSize +} // end of GetMaxSize +#endif // 0 + +/***********************************************************************/ +/* JSON EstimatedLength. Returns an estimated minimum line length. */ +/***********************************************************************/ +int TDBJSN::EstimatedLength(void) +{ + if (AvgLen <= 0) + return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better + else + return AvgLen; + +} // end of Estimated Length /***********************************************************************/ /* Find the row in the tree structure. */ @@ -881,7 +945,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g) if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key val = (jsp->GetType() == TYPE_JOB) ? - jsp->GetObject()->GetValue(objpath) : NULL; + jsp->GetObject()->GetKeyValue(objpath) : NULL; } else { if (*objpath == '[') { if (objpath[strlen(objpath) - 1] == ']') @@ -891,7 +955,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g) } // endif [ val = (jsp->GetType() == TYPE_JAR) ? - jsp->GetArray()->GetValue(atoi(objpath) - B) : NULL; + jsp->GetArray()->GetArrayValue(atoi(objpath) - B) : NULL; } // endif objpath jsp = (val) ? val->GetJson() : NULL; @@ -904,7 +968,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g) /* OpenDB: Data Base open routine for JSN access method. */ /***********************************************************************/ bool TDBJSN::OpenDB(PGLOBAL g) - { +{ if (Use == USE_OPEN) { /*******************************************************************/ /* Table already open replace it at its beginning. */ @@ -928,7 +992,51 @@ bool TDBJSN::OpenDB(PGLOBAL g) } // endif Use - if (TDBDOS::OpenDB(g)) + if (Pretty < 0) { + /*******************************************************************/ + /* Binary BJSON table. */ + /*******************************************************************/ + xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", + this, Tdb_No, Use, Mode); + + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open, just replace it at its beginning. */ + /*******************************************************************/ + if (!To_Kindex) { + Txfp->Rewind(); // see comment in Work.log + } else // Table is to be accessed through a sorted index table + To_Kindex->Reset(); + + return false; + } // endif use + + /*********************************************************************/ + /* Open according to logical input/output mode required. */ + /* Use conventionnal input/output functions. */ + /*********************************************************************/ + if (Txfp->OpenTableFile(g)) + return true; + + Use = USE_OPEN; // Do it now in case we are recursively called + + /*********************************************************************/ + /* Lrecl is Ok. */ + /*********************************************************************/ + size_t linelen = Lrecl; + MODE mode = Mode; + + // Buffer must be allocated in g->Sarea + Mode = MODE_ANY; + Txfp->AllocateBuffer(g); + Mode = mode; + + //To_Line = (char*)PlugSubAlloc(g, NULL, linelen); + //memset(To_Line, 0, linelen); + To_Line = Txfp->GetBuf(); + xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); + return false; + } else if (TDBDOS::OpenDB(g)) return true; if (Xcol) @@ -943,7 +1051,7 @@ bool TDBJSN::OpenDB(PGLOBAL g) /* Kindex construction if the file is accessed using an index. */ /***********************************************************************/ bool TDBJSN::SkipHeader(PGLOBAL g) - { +{ int len = GetFileLength(g); bool rc = false; @@ -952,62 +1060,71 @@ bool TDBJSN::SkipHeader(PGLOBAL g) return true; #endif // _DEBUG -#if defined(__WIN__) -#define Ending 2 -#else // !__WIN__ -#define Ending 1 -#endif // !__WIN__ - if (Pretty == 1) { if (Mode == MODE_INSERT || Mode == MODE_DELETE) { // Mode Insert and delete are no more handled here - assert(false); - } else if (len) // !Insert && !Delete + DBUG_ASSERT(false); + } else if (len > 0) // !Insert && !Delete rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g)); - } // endif Pretty + } // endif Pretty return rc; - } // end of SkipHeader +} // end of SkipHeader /***********************************************************************/ /* ReadDB: Data Base read routine for JSN access method. */ /***********************************************************************/ -int TDBJSN::ReadDB(PGLOBAL g) - { - int rc; +int TDBJSN::ReadDB(PGLOBAL g) { + int rc; + + N++; + + if (NextSame) { + SameRow = NextSame; + NextSame = 0; + M++; + return RC_OK; + } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) { + if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) + return rc; // Deferred reading failed + + if (Pretty >= 0) { + // Recover the memory used for parsing + PlugSubSet(G->Sarea, G->Sarea_Size); + + if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) { + Row = FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } else if (Pretty != 1 || strcmp(To_Line, "]")) { + strcpy(g->Message, G->Message); + rc = RC_FX; + } else + rc = RC_EF; - N++; + } else { + // Here we get a movable Json binary tree + PJSON jsp; + SWAP* swp; - if (NextSame) { - SameRow = NextSame; - NextSame = 0; - M++; - return RC_OK; - } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) { - if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) - // Deferred reading failed - return rc; - - // Recover the memory used for parsing - PlugSubSet(G->Sarea, G->Sarea_Size); - - if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) { - Row = FindRow(g); - SameRow = 0; - Fpos++; - M = 1; - rc = RC_OK; - } else if (Pretty != 1 || strcmp(To_Line, "]")) { - strcpy(g->Message, G->Message); - rc = RC_FX; - } else - rc = RC_EF; + jsp = (PJSON)To_Line; + swp = new(g) SWAP(G, jsp); + swp->SwapJson(jsp, false); // Restore pointers from offsets + Row = jsp; + Row = FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } // endif Pretty - } // endif ReadDB + } // endif ReadDB - return rc; - } // end of ReadDB + return rc; +} // end of ReadDB /***********************************************************************/ /* Make the top tree from the object path. */ @@ -1040,7 +1157,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) val->SetValue(objp); val = new(g) JVALUE; - objp->SetValue(g, val, objpath); + objp->SetKeyValue(g, val, objpath); } else { if (*objpath == '[') { // Old style @@ -1062,7 +1179,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) val = new(g) JVALUE; i = atoi(objpath) - B; - arp->SetValue(g, val, i); + arp->SetArrayValue(g, val, i); arp->InitArray(g); } // endif objpath @@ -1081,8 +1198,8 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* PrepareWriting: Prepare the line for WriteDB. */ /***********************************************************************/ - bool TDBJSN::PrepareWriting(PGLOBAL g) - { +bool TDBJSN::PrepareWriting(PGLOBAL g) +{ PSZ s; if (MakeTopTree(g, Row)) @@ -1103,7 +1220,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) } else return true; - } // end of PrepareWriting +} // end of PrepareWriting /***********************************************************************/ /* WriteDB: Data Base write routine for JSON access method. */ @@ -1117,7 +1234,16 @@ int TDBJSN::WriteDB(PGLOBAL g) return rc; } // end of WriteDB -/* ---------------------------- JSONCOL ------------------------------ */ +/***********************************************************************/ +/* Data Base close routine for JSON access method. */ +/***********************************************************************/ +void TDBJSN::CloseDB(PGLOBAL g) +{ + TDBDOS::CloseDB(g); + G = PlugExit(G); +} // end of CloseDB + + /* ---------------------------- JSONCOL ------------------------------ */ /***********************************************************************/ /* JSONCOL public constructor. */ @@ -1125,7 +1251,7 @@ int TDBJSN::WriteDB(PGLOBAL g) JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) : DOSCOL(g, cdp, tdbp, cprec, i, "DOS") { - Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp); + Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp); G = Tjp->G; Jpath = cdp->GetFmt(); MulVal = NULL; @@ -1135,6 +1261,7 @@ JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) Xnod = -1; Xpd = false; Parsed = false; + Warned = false; } // end of JSONCOL constructor /***********************************************************************/ @@ -1153,13 +1280,14 @@ JSONCOL::JSONCOL(JSONCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp) Xnod = col1->Xnod; Xpd = col1->Xpd; Parsed = col1->Parsed; + Warned = col1->Warned; } // end of JSONCOL copy constructor /***********************************************************************/ /* SetBuffer: prepare a column block for write operation. */ /***********************************************************************/ bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) - { +{ if (DOSCOL::SetBuffer(g, value, ok, check)) return true; @@ -1170,13 +1298,13 @@ bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) Tjp = (TDBJSN*)To_Tdb; G = Tjp->G; return false; - } // end of SetBuffer +} // end of SetBuffer /***********************************************************************/ /* Check whether this object is expanded. */ /***********************************************************************/ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) - { +{ if ((Tjp->Xcol && nm && !strcmp(nm, Tjp->Xcol) && (Tjp->Xval < 0 || Tjp->Xval == i)) || Xpd) { Xpd = true; // Expandable object @@ -1187,7 +1315,7 @@ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) } // endif Xcol return false; - } // end of CheckExpand +} // end of CheckExpand /***********************************************************************/ /* Analyse array processing options. */ @@ -1487,7 +1615,14 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) { if (Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric column"); + + if (!Warned) { + PushWarning(g, Tjp); + Warned = true; + } // endif Warned + Value->Reset(); +#if 0 } else if (Value->GetType() == TYPE_BIN) { if ((unsigned)Value->GetClen() >= sizeof(BSON)) { ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; @@ -1499,41 +1634,67 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) strcpy(g->Message, "Column size too small"); Value->SetValue_char(NULL, 0); } // endif Clen +#endif // 0 } else Value->SetValue_psz(Serialize(g, jsp, NULL, 0)); return Value; - } // end of MakeJson +} // end of MakeJson /***********************************************************************/ /* SetValue: Set a value from a JVALUE contains. */ /***********************************************************************/ -void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) - { - if (val) { +void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp) +{ + if (jvp) { vp->SetNull(false); - switch (val->GetValType()) { + switch (jvp->GetValType()) { case TYPE_STRG: case TYPE_INTG: case TYPE_BINT: case TYPE_DBL: case TYPE_DTM: - vp->SetValue_pval(val->GetValue()); + switch (vp->GetType()) { + case TYPE_STRING: + case TYPE_DATE: + vp->SetValue_psz(jvp->GetString(g)); + break; + case TYPE_INT: + case TYPE_SHORT: + case TYPE_TINY: + vp->SetValue(jvp->GetInteger()); + break; + case TYPE_BIGINT: + vp->SetValue(jvp->GetBigint()); + break; + case TYPE_DOUBLE: + vp->SetValue(jvp->GetFloat()); + + if (jvp->GetValType() == TYPE_DBL) + vp->SetPrec(jvp->Nd); + + break; + default: + sprintf(g->Message, "Unsupported column type %d\n", vp->GetType()); + throw 888; + } // endswitch Type + break; case TYPE_BOOL: if (vp->IsTypeNum()) - vp->SetValue(val->GetInteger() ? 1 : 0); + vp->SetValue(jvp->GetInteger() ? 1 : 0); else - vp->SetValue_psz((PSZ)(val->GetInteger() ? "true" : "false")); + vp->SetValue_psz((PSZ)(jvp->GetInteger() ? "true" : "false")); break; case TYPE_JAR: - SetJsonValue(g, vp, val->GetArray()->GetValue(0), n); - break; +// SetJsonValue(g, vp, val->GetArray()->GetValue(0)); + vp->SetValue_psz(jvp->GetArray()->GetText(g, NULL)); + break; case TYPE_JOB: // if (!vp->IsTypeNum() || !Strict) { - vp->SetValue_psz(val->GetObject()->GetText(g, NULL)); + vp->SetValue_psz(jvp->GetObject()->GetText(g, NULL)); break; // } // endif Type @@ -1547,37 +1708,37 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) vp->SetNull(true); } // endif val - } // end of SetJsonValue +} // end of SetJsonValue /***********************************************************************/ /* ReadColumn: */ /***********************************************************************/ void JSONCOL::ReadColumn(PGLOBAL g) - { +{ if (!Tjp->SameRow || Xnod >= Tjp->SameRow) Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0)); - if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept) - throw("Null expandable JSON value"); +// if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept) +// throw("Null expandable JSON value"); // Set null when applicable if (!Nullable) Value->SetNull(false); - } // end of ReadColumn +} // end of ReadColumn /***********************************************************************/ /* GetColumnValue: */ /***********************************************************************/ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) - { +{ int n = Nod - 1; PJAR arp; PJVAL val = NULL; for (; i < Nod && row; i++) { if (Nodes[i].Op == OP_NUM) { - Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1); + Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1); return(Value); } else if (Nodes[i].Op == OP_XX) { return MakeJson(G, row); @@ -1591,7 +1752,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) val = new(G) JVALUE(row); } else - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: @@ -1599,7 +1760,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else if (Nodes[i].Op == OP_EXP) return ExpandArray(g, arp, i); else @@ -1607,7 +1768,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif's @@ -1625,15 +1786,15 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) } // endfor i - SetJsonValue(g, Value, val, n); + SetJsonValue(g, Value, val); return Value; - } // end of GetColumnValue +} // end of GetColumnValue /***********************************************************************/ /* ExpandArray: */ /***********************************************************************/ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) - { +{ int ars = MY_MIN(Tjp->Limit, arp->size()); PJVAL jvp; JVALUE jval; @@ -1645,13 +1806,13 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) return Value; } // endif ars - if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) { + if (!(jvp = arp->GetArrayValue((Nodes[n].Rx = Nodes[n].Nx)))) { strcpy(g->Message, "Logical error expanding array"); throw 666; } // endif jvp if (n < Nod - 1 && jvp->GetJson()) { - jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1)); + jval.SetValue(g, GetColumnValue(g, jvp->GetJson(), n + 1)); jvp = &jval; } // endif n @@ -1665,15 +1826,15 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) Tjp->NextSame = Xnod; } // endif NextSame - SetJsonValue(g, Value, jvp, n); + SetJsonValue(g, Value, jvp); return Value; - } // end of ExpandArray +} // end of ExpandArray /***********************************************************************/ /* CalculateArray: */ /***********************************************************************/ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) - { +{ int i, ars, nv = 0, nextsame = Tjp->NextSame; bool err; OPVAL op = Nodes[n].Op; @@ -1689,18 +1850,19 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) ars, op, nextsame); for (i = 0; i < ars; i++) { - jvrp = arp->GetValue(i); + jvrp = arp->GetArrayValue(i); if (trace(1)) htrc("i=%d nv=%d\n", i, nv); if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do { if (jvrp->IsNull()) { - jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING); + jvrp->Strp = PlugDup(g, GetJsonNull()); + jvrp->DataType = TYPE_STRG; jvp = jvrp; } else if (n < Nod - 1 && jvrp->GetJson()) { Tjp->NextSame = nextsame; - jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1)); + jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); jvp = &jval; } else jvp = jvrp; @@ -1710,10 +1872,10 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) jvp->GetString(g), jvp->IsNull() ? 1 : 0); if (!nv++) { - SetJsonValue(g, vp, jvp, n); + SetJsonValue(g, vp, jvp); continue; } else - SetJsonValue(g, MulVal, jvp, n); + SetJsonValue(g, MulVal, jvp); if (!MulVal->IsNull()) { switch (op) { @@ -1768,19 +1930,19 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) Tjp->NextSame = nextsame; return vp; - } // end of CalculateArray +} // end of CalculateArray /***********************************************************************/ /* GetRow: Get the object containing this column. */ /***********************************************************************/ PJSON JSONCOL::GetRow(PGLOBAL g) - { +{ PJVAL val = NULL; PJAR arp; PJSON nwr, row = Tjp->Row; for (int i = 0; i < Nod && row; i++) { - if (Nodes[i+1].Op == OP_XX) + if (i < Nod-1 && Nodes[i+1].Op == OP_XX) break; else switch (row->GetType()) { case TYPE_JOB: @@ -1788,20 +1950,20 @@ PJSON JSONCOL::GetRow(PGLOBAL g) // Expected Array was not there, wrap the value continue; - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: arp = (PJAR)row; if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else - val = arp->GetValue(Nodes[i].Rx); + val = arp->GetArrayValue(Nodes[i].Rx); } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif Nodes @@ -1828,9 +1990,9 @@ PJSON JSONCOL::GetRow(PGLOBAL g) nwr = new(G) JOBJECT; if (row->GetType() == TYPE_JOB) { - ((PJOB)row)->SetValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key); + ((PJOB)row)->SetKeyValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key); } else if (row->GetType() == TYPE_JAR) { - ((PJAR)row)->AddValue(G, new(G) JVALUE(nwr)); + ((PJAR)row)->AddArrayValue(G, new(G) JVALUE(nwr)); ((PJAR)row)->InitArray(G); } else { strcpy(g->Message, "Wrong type when writing new row"); @@ -1846,13 +2008,13 @@ PJSON JSONCOL::GetRow(PGLOBAL g) } // endfor i return row; - } // end of GetRow +} // end of GetRow /***********************************************************************/ /* WriteColumn: */ /***********************************************************************/ void JSONCOL::WriteColumn(PGLOBAL g) - { +{ if (Xpd && Tjp->Pretty < 2) { strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); throw 666; @@ -1888,21 +2050,21 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (Nodes[Nod-1].Op == OP_XX) { s = Value->GetCharValue(); - if (!(jsp = ParseJson(G, s, (int)strlen(s)))) { + if (!(jsp = ParseJson(G, s, strlen(s)))) { strcpy(g->Message, s); throw 666; } // endif jsp if (arp) { if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ) - arp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank); + arp->SetArrayValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank); else - arp->AddValue(G, new(G) JVALUE(jsp)); + arp->AddArrayValue(G, new(G) JVALUE(jsp)); arp->InitArray(G); } else if (objp) { if (Nod > 1 && Nodes[Nod-2].Key) - objp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key); + objp->SetKeyValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key); } else if (jvp) jvp->SetValue(jsp); @@ -1919,24 +2081,24 @@ void JSONCOL::WriteColumn(PGLOBAL g) case TYPE_DOUBLE: if (arp) { if (Nodes[Nod-1].Op == OP_EQ) - arp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank); + arp->SetArrayValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank); else - arp->AddValue(G, new(G) JVALUE(G, Value)); + arp->AddArrayValue(G, new(G) JVALUE(G, Value)); arp->InitArray(G); } else if (objp) { if (Nodes[Nod-1].Key) - objp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key); + objp->SetKeyValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key); } else if (jvp) - jvp->SetValue(Value); + jvp->SetValue(g, Value); break; default: // ?????????? sprintf(g->Message, "Invalid column type %d", Buf_Type); } // endswitch Type - } // end of WriteColumn +} // end of WriteColumn /* -------------------------- Class TDBJSON -------------------------- */ @@ -1944,23 +2106,23 @@ void JSONCOL::WriteColumn(PGLOBAL g) /* Implementation of the TDBJSON class. */ /***********************************************************************/ TDBJSON::TDBJSON(PJDEF tdp, PTXF txfp) : TDBJSN(tdp, txfp) - { +{ Doc = NULL; Multiple = tdp->Multiple; Done = Changed = false; - } // end of TDBJSON standard constructor +} // end of TDBJSON standard constructor TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp) - { +{ Doc = tdbp->Doc; Multiple = tdbp->Multiple; Done = tdbp->Done; Changed = tdbp->Changed; - } // end of TDBJSON copy constructor +} // end of TDBJSON copy constructor // Used for update PTDB TDBJSON::Clone(PTABS t) - { +{ PTDB tp; PJCOL cp1, cp2; PGLOBAL g = t->G; @@ -1973,13 +2135,13 @@ PTDB TDBJSON::Clone(PTABS t) } // endfor cp1 return tp; - } // end of Clone +} // end of Clone /***********************************************************************/ /* Make the document tree from the object path. */ /***********************************************************************/ int TDBJSON::MakeNewDoc(PGLOBAL g) - { +{ // Create a void table that will be populated Doc = new(g) JARRAY; @@ -1988,15 +2150,16 @@ int TDBJSON::MakeNewDoc(PGLOBAL g) Done = true; return RC_OK; - } // end of MakeNewDoc +} // end of MakeNewDoc /***********************************************************************/ /* Make the document tree from a file. */ /***********************************************************************/ int TDBJSON::MakeDocument(PGLOBAL g) - { +{ char *p, *p1, *p2, *memory, *objpath, *key = NULL; - int len, i = 0; + int i = 0; + size_t len; my_bool a; MODE mode = Mode; PJSON jsp; @@ -2075,7 +2238,7 @@ int TDBJSON::MakeDocument(PGLOBAL g) key = p; objp = jsp->GetObject(); arp = NULL; - val = objp->GetValue(key); + val = objp->GetKeyValue(key); if (!val || !(jsp = val->GetJson())) { sprintf(g->Message, "Cannot find object key %s", key); @@ -2101,7 +2264,7 @@ int TDBJSON::MakeDocument(PGLOBAL g) arp = jsp->GetArray(); objp = NULL; i = atoi(p) - B; - val = arp->GetValue(i); + val = arp->GetArrayValue(i); if (!val) { sprintf(g->Message, "Cannot find array value %d", i); @@ -2122,17 +2285,17 @@ int TDBJSON::MakeDocument(PGLOBAL g) Doc = new(g) JARRAY; if (val) { - Doc->AddValue(g, val); + Doc->AddArrayValue(g, val); Doc->InitArray(g); } else if (jsp) { - Doc->AddValue(g, new(g) JVALUE(jsp)); + Doc->AddArrayValue(g, new(g) JVALUE(jsp)); Doc->InitArray(g); } // endif val if (objp) - objp->SetValue(g, new(g) JVALUE(Doc), key); + objp->SetKeyValue(g, new(g) JVALUE(Doc), key); else if (arp) - arp->SetValue(g, new(g) JVALUE(Doc), i); + arp->SetArrayValue(g, new(g) JVALUE(Doc), i); else Top = Doc; @@ -2140,13 +2303,13 @@ int TDBJSON::MakeDocument(PGLOBAL g) Done = true; return RC_OK; - } // end of MakeDocument +} // end of MakeDocument /***********************************************************************/ /* JSON Cardinality: returns table size in number of rows. */ /***********************************************************************/ int TDBJSON::Cardinality(PGLOBAL g) - { +{ if (!g) return (Xcol || Multiple) ? 0 : 1; else if (Cardinal < 0) @@ -2159,48 +2322,48 @@ int TDBJSON::Cardinality(PGLOBAL g) return 10; } return Cardinal; - } // end of Cardinality +} // end of Cardinality /***********************************************************************/ /* JSON GetMaxSize: returns table size estimate in number of rows. */ /***********************************************************************/ int TDBJSON::GetMaxSize(PGLOBAL g) - { +{ if (MaxSize < 0) MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1); return MaxSize; - } // end of GetMaxSize +} // end of GetMaxSize /***********************************************************************/ /* ResetSize: call by TDBMUL when calculating size estimate. */ /***********************************************************************/ void TDBJSON::ResetSize(void) - { +{ MaxSize = Cardinal = -1; Fpos = -1; N = 0; Done = false; - } // end of ResetSize +} // end of ResetSize /***********************************************************************/ /* TDBJSON is not indexable. */ /***********************************************************************/ int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool) - { +{ if (pxdf) { strcpy(g->Message, "JSON not indexable when pretty = 2"); return RC_FX; } else return RC_OK; - } // end of MakeIndex +} // end of MakeIndex /***********************************************************************/ /* Return the position in the table. */ /***********************************************************************/ int TDBJSON::GetRecpos(void) - { +{ #if 0 union { uint Rpos; @@ -2212,13 +2375,13 @@ int TDBJSON::GetRecpos(void) return Rpos; #endif // 0 return Fpos; - } // end of GetRecpos +} // end of GetRecpos /***********************************************************************/ /* Set the position in the table. */ /***********************************************************************/ bool TDBJSON::SetRecpos(PGLOBAL, int recpos) - { +{ #if 0 union { uint Rpos; @@ -2239,13 +2402,13 @@ bool TDBJSON::SetRecpos(PGLOBAL, int recpos) Fpos = recpos - 1; return false; - } // end of SetRecpos +} // end of SetRecpos /***********************************************************************/ /* JSON Access Method opening routine. */ /***********************************************************************/ bool TDBJSON::OpenDB(PGLOBAL g) - { +{ if (Use == USE_OPEN) { /*******************************************************************/ /* Table already open replace it at its beginning. */ @@ -2277,13 +2440,13 @@ bool TDBJSON::OpenDB(PGLOBAL g) Use = USE_OPEN; return false; - } // end of OpenDB +} // end of OpenDB /***********************************************************************/ /* ReadDB: Data Base read routine for JSON access method. */ /***********************************************************************/ int TDBJSON::ReadDB(PGLOBAL) - { +{ int rc; N++; @@ -2294,61 +2457,61 @@ int TDBJSON::ReadDB(PGLOBAL) M++; rc = RC_OK; } else if (++Fpos < (signed)Doc->size()) { - Row = Doc->GetValue(Fpos); + Row = Doc->GetArrayValue(Fpos); if (Row->GetType() == TYPE_JVAL) Row = ((PJVAL)Row)->GetJson(); SameRow = 0; M = 1; - rc = RC_OK; + rc = RC_OK; } else rc = RC_EF; return rc; - } // end of ReadDB +} // end of ReadDB /***********************************************************************/ /* WriteDB: Data Base write routine for JSON access method. */ /***********************************************************************/ int TDBJSON::WriteDB(PGLOBAL g) - { +{ if (Jmode == MODE_OBJECT) { PJVAL vp = new(g) JVALUE(Row); if (Mode == MODE_INSERT) { - Doc->AddValue(g, vp); + Doc->AddArrayValue(g, vp); Row = new(g) JOBJECT; - } else if (Doc->SetValue(g, vp, Fpos)) + } else if (Doc->SetArrayValue(g, vp, Fpos)) return RC_FX; } else if (Jmode == MODE_ARRAY) { PJVAL vp = new(g) JVALUE(Row); if (Mode == MODE_INSERT) { - Doc->AddValue(g, vp); + Doc->AddArrayValue(g, vp); Row = new(g) JARRAY; - } else if (Doc->SetValue(g, vp, Fpos)) + } else if (Doc->SetArrayValue(g, vp, Fpos)) return RC_FX; } else { // if (Jmode == MODE_VALUE) if (Mode == MODE_INSERT) { - Doc->AddValue(g, (PJVAL)Row); + Doc->AddArrayValue(g, (PJVAL)Row); Row = new(g) JVALUE; - } else if (Doc->SetValue(g, (PJVAL)Row, Fpos)) + } else if (Doc->SetArrayValue(g, (PJVAL)Row, Fpos)) return RC_FX; } // endif Jmode Changed = true; return RC_OK; - } // end of WriteDB +} // end of WriteDB /***********************************************************************/ /* Data Base delete line routine for JSON access method. */ /***********************************************************************/ int TDBJSON::DeleteDB(PGLOBAL g, int irc) - { +{ if (irc == RC_OK) { // Deleted current row if (Doc->DeleteValue(Fpos)) { @@ -2365,13 +2528,13 @@ int TDBJSON::DeleteDB(PGLOBAL g, int irc) } // endfor i return RC_OK; - } // end of DeleteDB +} // end of DeleteDB /***********************************************************************/ /* Data Base close routine for JSON access methods. */ /***********************************************************************/ void TDBJSON::CloseDB(PGLOBAL g) - { +{ if (!Changed) return; @@ -2387,7 +2550,7 @@ void TDBJSON::CloseDB(PGLOBAL g) if (!Serialize(g, Top, filename, Pretty)) puts(g->Message); - } // end of CloseDB +} // end of CloseDB /* ---------------------------TDBJCL class --------------------------- */ @@ -2395,18 +2558,18 @@ void TDBJSON::CloseDB(PGLOBAL g) /* TDBJCL class constructor. */ /***********************************************************************/ TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) - { +{ Topt = tdp->GetTopt(); Db = tdp->Schema; Dsn = tdp->Uri; - } // end of TDBJCL constructor +} // end of TDBJCL constructor /***********************************************************************/ /* GetResult: Get the list the JSON file columns. */ /***********************************************************************/ PQRYRES TDBJCL::GetResult(PGLOBAL g) - { +{ return JSONColumns(g, Db, Dsn, Topt, false); - } // end of GetResult +} // end of GetResult /* --------------------------- End of json --------------------------- */ diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 88aa5e2ee8b..b47dc9b0665 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -5,6 +5,7 @@ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ +#pragma once //#include "osutil.h" // Unuseful and bad for OEM #include "block.h" #include "colblk.h" @@ -35,7 +36,7 @@ typedef struct _jncol { struct _jncol *Next; char *Name; char *Fmt; - int Type; + JTYP Type; int Len; int Scale; bool Cbn; @@ -58,7 +59,7 @@ public: // Members JCOL jcol; PJCL jcp, fjcp, pjcp; - PVAL valp; +//PVL vlp; PJDEF tdp; TDBJSN *tjnp; PJTDB tjsp; @@ -68,7 +69,7 @@ public: PCSZ sep; char colname[65], fmt[129], buf[16]; uint *length; - int i, n, bf, ncol, lvl, sz; + int i, n, bf, ncol, lvl, sz, limit; bool all, strfy; }; // end of JSONDISC @@ -126,6 +127,7 @@ public: class DllExport TDBJSN : public TDBDOS { friend class JSONCOL; friend class JSONDEF; + friend class JSONDISC; #if defined(CMGO_SUPPORT) friend class CMGFAM; #endif // CMGO_SUPPORT @@ -154,14 +156,18 @@ public: {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;} // Database routines - virtual int Cardinality(PGLOBAL g); - virtual int GetMaxSize(PGLOBAL g); + //virtual int Cardinality(PGLOBAL g); + //virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual bool PrepareWriting(PGLOBAL g); virtual int WriteDB(PGLOBAL g); + virtual void CloseDB(PGLOBAL g); - protected: + // Specific routine + virtual int EstimatedLength(void); + +protected: PJSON FindRow(PGLOBAL g); int MakeTopTree(PGLOBAL g, PJSON jsp); @@ -169,7 +175,7 @@ public: PGLOBAL G; // Support of parse memory PJSON Top; // The top JSON tree PJSON Row; // The current row - PJSON Val; // The value of the current row + PJVAL Val; // The value of the current row PJCOL Colp; // The multiple column JMODE Jmode; // MODE_OBJECT by default PCSZ Objname; // The table object name @@ -186,7 +192,8 @@ public: char Sep; // The Jpath separator bool Strict; // Strict syntax checking bool Comma; // Row has final comma - }; // end of class TDBJSN + bool Xpdable; // False: expandable columns are NULL +}; // end of class TDBJSN /* -------------------------- JSONCOL class -------------------------- */ @@ -224,8 +231,8 @@ public: PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); PVAL MakeJson(PGLOBAL g, PJSON jsp); - void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); - PJSON GetRow(PGLOBAL g); + void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val); + PJSON GetRow(PGLOBAL g); // Default constructor not to be used JSONCOL(void) {} @@ -241,7 +248,8 @@ public: char Sep; // The Jpath separator bool Xpd; // True for expandable column bool Parsed; // True when parsed - }; // end of class JSONCOL + bool Warned; // True when warning issued +}; // end of class JSONCOL /* -------------------------- TDBJSON class -------------------------- */ diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp index b1bdeffc880..1efda6e3bca 100644 --- a/storage/connect/tabrest.cpp +++ b/storage/connect/tabrest.cpp @@ -1,8 +1,11 @@ /************** tabrest C++ Program Source Code File (.CPP) ************/ -/* PROGRAM NAME: tabrest Version 1.7 */ -/* (C) Copyright to the author Olivier BERTRAND 2018 - 2019 */ +/* PROGRAM NAME: tabrest Version 1.8 */ +/* (C) Copyright to the author Olivier BERTRAND 2018 - 2020 */ /* This program is the REST Web API support for MariaDB. */ /* When compiled without MARIADB defined, it is the EOM module code. */ +/* The way Connect handles NOSQL data returned by REST queries is */ +/* just by retrieving it as a file and then leave the existing data */ +/* type tables (JSON, XML or CSV) process it as usual. */ /***********************************************************************/ /***********************************************************************/ @@ -10,6 +13,8 @@ /***********************************************************************/ #if defined(MARIADB) #include <my_global.h> // All MariaDB stuff +#include <mysqld.h> +#include <sql_error.h> #else // !MARIADB OEM module #include "mini-global.h" #define _MAX_PATH 260 @@ -42,7 +47,19 @@ #include "tabfmt.h" #include "tabrest.h" +#if defined(connect_EXPORTS) +#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M) +#else +#define PUSH_WARNING(M) htrc(M) +#endif + +#if defined(__WIN__) || defined(_WINDOWS) +#define popen _popen +#define pclose _pclose +#endif + static XGETREST getRestFnc = NULL; +static int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename); #if !defined(MARIADB) /***********************************************************************/ @@ -72,7 +89,41 @@ PTABDEF __stdcall GetREST(PGLOBAL g, void *memp) #endif // !MARIADB /***********************************************************************/ -/* GetREST: get the external TABDEF from OEM module. */ +/* Xcurl: retrieve the REST answer by executing cURL. */ +/***********************************************************************/ +int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename) +{ + char buf[1024]; + int rc; + FILE *pipe; + + if (Uri) { + if (*Uri == '/' || Http[strlen(Http) - 1] == '/') + sprintf(buf, "curl %s%s -o %s", Http, Uri, filename); + else + sprintf(buf, "curl %s/%s -o %s", Http, Uri, filename); + + } else + sprintf(buf, "curl %s -o %s", Http, filename); + + if ((pipe = popen(buf, "rt"))) { + if (trace(515)) + while (fgets(buf, sizeof(buf), pipe)) { + htrc("%s", buf); + } // endwhile + + pclose(pipe); + rc = 0; + } else { + sprintf(g->Message, "curl failed, errno =%d", errno); + rc = 1; + } // endif pipe + + return rc; +} // end od Xcurl + +/***********************************************************************/ +/* GetREST: load the Rest lib and get the Rest function. */ /***********************************************************************/ XGETREST GetRestFunction(PGLOBAL g) { @@ -148,13 +199,15 @@ PQRYRES RESTColumns(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) #endif // !MARIADB { - PQRYRES qrp= NULL; - char filename[_MAX_PATH + 1]; // MAX PATH ??? - PCSZ http, uri, fn, ftype; + PQRYRES qrp= NULL; + char filename[_MAX_PATH + 1]; // MAX PATH ??? + int rc; + bool curl = false; + PCSZ http, uri, fn, ftype; XGETREST grf = GetRestFunction(g); if (!grf) - return NULL; + curl = true; http = GetStringTableOption(g, tp, "Http", NULL); uri = GetStringTableOption(g, tp, "Uri", NULL); @@ -178,17 +231,27 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) fn = filename; tp->filename = PlugDup(g, fn); + sprintf(g->Message, "No file name. Table will use %s", fn); + PUSH_WARNING(g->Message); } // endif fn // We used the file name relative to recorded datapath PlugSetPath(filename, fn, db); - //strcat(strcat(strcat(strcpy(filename, "."), slash), db), slash); - //strncat(filename, fn, _MAX_PATH - strlen(filename)); + curl = GetBooleanTableOption(g, tp, "Curl", curl); // Retrieve the file from the web and copy it locally - if (http && grf(g->Message, trace(515), http, uri, filename)) { - // sprintf(g->Message, "Failed to get file at %s", http); - } else if (!stricmp(ftype, "JSON")) + if (curl) + rc = Xcurl(g, http, uri, filename); + else if (grf) + rc = grf(g->Message, trace(515), http, uri, filename); + else { + strcpy(g->Message, "Cannot access to curl nor casablanca"); + rc = 1; + } // endif !grf + + if (rc) + return NULL; + else if (!stricmp(ftype, "JSON")) qrp = JSONColumns(g, db, NULL, tp, info); else if (!stricmp(ftype, "CSV")) qrp = CSVColumns(g, NULL, tp, info); @@ -209,14 +272,14 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) /***********************************************************************/ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { - char filename[_MAX_PATH + 1]; - int rc = 0, n; - bool xt = trace(515); - LPCSTR ftype; + char filename[_MAX_PATH + 1]; + int rc = 0, n; + bool curl = false, xt = trace(515); + LPCSTR ftype; XGETREST grf = GetRestFunction(g); if (!grf) - return true; + curl = true; #if defined(MARIADB) ftype = GetStringCatInfo(g, "Type", "JSON"); @@ -235,8 +298,8 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) : (!stricmp(ftype, "CSV")) ? 3 : 0; if (n == 0) { - htrc("DefineAM: Unsupported REST table type %s", am); - sprintf(g->Message, "Unsupported REST table type %s", am); + htrc("DefineAM: Unsupported REST table type %s\n", ftype); + sprintf(g->Message, "Unsupported REST table type %s", ftype); return true; } // endif n @@ -247,11 +310,19 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) // We used the file name relative to recorded datapath PlugSetPath(filename, Fn, GetPath()); - // Retrieve the file from the web and copy it locally - rc = grf(g->Message, xt, Http, Uri, filename); + curl = GetBoolCatInfo("Curl", curl); - if (xt) - htrc("Return from restGetFile: rc=%d\n", rc); + // Retrieve the file from the web and copy it locally + if (curl) { + rc = Xcurl(g, Http, Uri, filename); + xtrc(515, "Return from Xcurl: rc=%d\n", rc); + } else if (grf) { + rc = grf(g->Message, xt, Http, Uri, filename); + xtrc(515, "Return from restGetFile: rc=%d\n", rc); + } else { + strcpy(g->Message, "Cannot access to curl nor casablanca"); + rc = 1; + } // endif !grf if (rc) return true; diff --git a/storage/connect/tabrest.h b/storage/connect/tabrest.h index f08ac7984c9..9cf2d10a6b8 100644 --- a/storage/connect/tabrest.h +++ b/storage/connect/tabrest.h @@ -5,7 +5,10 @@ /***********************************************************************/ #pragma once -#ifndef __WIN__ +#if defined(__WIN__) +static PCSZ slash = "\\"; +#else // !__WIN__ +static PCSZ slash = "/"; #define stricmp strcasecmp #endif // !__WIN__ diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index f5a105a530d..0a91f36afa7 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -708,7 +708,7 @@ bool PRXCOL::Init(PGLOBAL g, PTDB tp) MODE mode = To_Tdb->GetMode(); // Needed for MYSQL subtables - ((XCOLBLK*)Colp)->Name = Decode(g, Colp->GetName()); + ((COLBLK*)Colp)->SetName(Decode(g, Colp->GetName())); // May not have been done elsewhere Colp->InitValue(g); diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp index 76d52e198e3..2fdb7f64744 100644 --- a/storage/connect/tabvir.cpp +++ b/storage/connect/tabvir.cpp @@ -168,17 +168,16 @@ int TDBVIR::TestFilter(PFIL filp, bool nop) } // endswitch op if (!nop) switch (op) { - case OP_LT: l1--; - /* falls through */ - case OP_LE: limit = l1; break; - default: ok = false; - } // endswitch op + case OP_LT: l1--; /* fall through */ + case OP_LE: limit = l1; break; + default: ok = false; + } // endswitch op + else switch (op) { - case OP_GE: l1--; - /* falls through */ - case OP_GT: limit = l1; break; - default: ok = false; - } // endswitch op + case OP_GE: l1--; /* fall through */ + case OP_GT: limit = l1; break; + default: ok = false; + } // endswitch op limit = MY_MIN(MY_MAX(0, limit), Size); diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index c8f38b68015..5268651d080 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -112,8 +112,7 @@ bool user_connect::user_init() if (g) printf("%s\n", g->Message); - int rc __attribute__((unused))= PlugExit(g); - g= NULL; + g= PlugExit(g); if (dup) free(dup); diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 5951b26e81e..412cb808936 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -197,7 +197,7 @@ const char *GetFormatType(int type) case TYPE_DOUBLE: c = "F"; break; case TYPE_DATE: c = "D"; break; case TYPE_TINY: c = "T"; break; - case TYPE_DECIM: c = "M"; break; + case TYPE_DECIM: c = "F"; break; case TYPE_BIN: c = "B"; break; case TYPE_PCHAR: c = "P"; break; } // endswitch type @@ -380,8 +380,8 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, case TYPE_STRING: valp = new(g) TYPVAL<PSZ>(g, (PSZ)NULL, len, prec); break; - case TYPE_DATE: - valp = new(g) DTVAL(g, len, prec, fmt); + case TYPE_DATE: + valp = new(g) DTVAL(g, len, prec, fmt); break; case TYPE_INT: if (uns) diff --git a/storage/connect/value.h b/storage/connect/value.h index ee7a1c8032f..df6a55501b6 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -65,7 +65,8 @@ DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc); /***********************************************************************/ class DllExport VALUE : public BLOCK { friend class CONSTANT; // The only object allowed to use SetConstFormat - public: + friend class SWAP; // The only class allowed to access protected +public: // Constructors // Implementation @@ -260,7 +261,8 @@ class DllExport TYPVAL : public VALUE { /***********************************************************************/ template <> class DllExport TYPVAL<PSZ>: public VALUE { - public: + friend class SWAP; // The only class allowed to offsets Strg +public: // Constructors TYPVAL(PSZ s, short c = 0); TYPVAL(PGLOBAL g, PSZ s, int n, int c); @@ -346,7 +348,8 @@ class DllExport DECVAL: public TYPVAL<PSZ> { /* Specific BINARY class. */ /***********************************************************************/ class DllExport BINVAL: public VALUE { - public: + friend class SWAP; // The only class allowed to offsets pointers +public: // Constructors //BINVAL(void *p); BINVAL(PGLOBAL g, void *p, int cl, int n); @@ -415,7 +418,8 @@ class DllExport DTVAL : public TYPVAL<int> { virtual bool SetValue_char(const char *p, int n); virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); - virtual char *GetCharString(char *p); + virtual PSZ GetCharValue(void) { return Sdate; } + virtual char *GetCharString(char *p); virtual int ShowValue(char *buf, int len); virtual bool FormatValue(PVAL vp, PCSZ fmt); bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index bc5912d3054..5b50e9320f5 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -130,6 +130,7 @@ class DllExport STRING : public BLOCK { inline void SetLength(uint n) {Length = n;} inline PSZ GetStr(void) {return Strp;} inline uint32 GetSize(void) {return Size;} + inline char GetLastChar(void) {return Length ? Strp[Length - 1] : 0;} inline bool IsTruncated(void) {return Trc;} // Methods diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index badb515c2b2..6e7ee48d2eb 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -528,7 +528,7 @@ int ha_tina::encode_quote(const uchar *buf) String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin); bool ietf_quotes= table_share->option_struct->ietf_quotes; - my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); buffer.length(0); for (Field **field=table->field ; *field ; field++) @@ -606,7 +606,7 @@ int ha_tina::encode_quote(const uchar *buf) //buffer.replace(buffer.length(), 0, "\n", 1); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return (buffer.length()); } @@ -659,7 +659,6 @@ int ha_tina::find_current_row(uchar *buf) { my_off_t end_offset, curr_offset= current_position; int eoln_len; - my_bitmap_map *org_bitmap; int error; bool read_all; bool ietf_quotes= table_share->option_struct->ietf_quotes; @@ -679,7 +678,7 @@ int ha_tina::find_current_row(uchar *buf) /* We must read all columns in case a table is opened for update */ read_all= !bitmap_is_clear_all(table->write_set); /* Avoid asserts in ::store() for columns that are not going to be updated */ - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); error= HA_ERR_CRASHED_ON_USAGE; memset(buf, 0, table->s->null_bytes); @@ -857,7 +856,7 @@ int ha_tina::find_current_row(uchar *buf) error= 0; err: - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(error); } diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index ec34cf16858..00407730c31 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -936,7 +936,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, { ulong *lengths; Field **field; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); DBUG_ENTER("ha_federated::convert_row_to_internal_format"); lengths= mysql_fetch_lengths(result); @@ -965,7 +965,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, } (*field)->move_field_offset(-old_ptr); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(0); } @@ -1293,14 +1293,13 @@ bool ha_federated::create_where_from_key(String *to, char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE]; String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; - my_bitmap_map *old_map; DBUG_ENTER("ha_federated::create_where_from_key"); tmp.length(0); if (start_key == NULL && end_key == NULL) DBUG_RETURN(1); - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (uint i= 0; i <= 1; i++) { bool needs_quotes; @@ -1477,7 +1476,7 @@ prepare_for_next_key_part: tmp.c_ptr_quick())); } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); if (both_not_null) if (tmp.append(STRING_WITH_LEN(") "))) @@ -1492,7 +1491,7 @@ prepare_for_next_key_part: DBUG_RETURN(0); err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(1); } @@ -1841,7 +1840,7 @@ int ha_federated::write_row(const uchar *buf) String insert_field_value_string(insert_field_value_buffer, sizeof(insert_field_value_buffer), &my_charset_bin); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("ha_federated::write_row"); values_string.length(0); @@ -1895,7 +1894,7 @@ int ha_federated::write_row(const uchar *buf) values_string.append(STRING_WITH_LEN(", ")); } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); /* if there were no fields, we don't want to add a closing paren @@ -2203,7 +2202,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data) else { /* otherwise = */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) @@ -2212,7 +2211,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data) if (needs_quote) update_string.append(value_quote_char); field_value.length(0); - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); } update_string.append(STRING_WITH_LEN(", ")); } diff --git a/storage/federatedx/federatedx_io_mysql.cc b/storage/federatedx/federatedx_io_mysql.cc index cc4d8ca7c70..f33cf45a241 100644 --- a/storage/federatedx/federatedx_io_mysql.cc +++ b/storage/federatedx/federatedx_io_mysql.cc @@ -64,7 +64,6 @@ struct mysql_position class federatedx_io_mysql :public federatedx_io { MYSQL mysql; /* MySQL connection */ - MYSQL_ROWS *current; DYNAMIC_ARRAY savepoints; bool requested_autocommit; bool actual_autocommit; @@ -108,7 +107,8 @@ public: virtual void free_result(FEDERATEDX_IO_RESULT *io_result); virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result); virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result); - virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result); + virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result, + FEDERATEDX_IO_ROWS **current= NULL); virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result); virtual const char *get_column_data(FEDERATEDX_IO_ROW *row, unsigned int column); @@ -117,7 +117,7 @@ public: virtual size_t get_ref_length() const; virtual void mark_position(FEDERATEDX_IO_RESULT *io_result, - void *ref); + void *ref, FEDERATEDX_IO_ROWS *current); virtual int seek_position(FEDERATEDX_IO_RESULT **io_result, const void *ref); virtual void set_thd(void *thd); @@ -517,10 +517,12 @@ my_ulonglong federatedx_io_mysql::get_num_rows(FEDERATEDX_IO_RESULT *io_result) } -FEDERATEDX_IO_ROW *federatedx_io_mysql::fetch_row(FEDERATEDX_IO_RESULT *io_result) +FEDERATEDX_IO_ROW *federatedx_io_mysql::fetch_row(FEDERATEDX_IO_RESULT *io_result, + FEDERATEDX_IO_ROWS **current) { MYSQL_RES *result= (MYSQL_RES*)io_result; - current= result->data_cursor; + if (current) + *current= (FEDERATEDX_IO_ROWS *) result->data_cursor; return (FEDERATEDX_IO_ROW *) mysql_fetch_row(result); } @@ -628,11 +630,11 @@ size_t federatedx_io_mysql::get_ref_length() const void federatedx_io_mysql::mark_position(FEDERATEDX_IO_RESULT *io_result, - void *ref) + void *ref, FEDERATEDX_IO_ROWS *current) { mysql_position& pos= *reinterpret_cast<mysql_position*>(ref); pos.result= (MYSQL_RES *) io_result; - pos.offset= current; + pos.offset= (MYSQL_ROW_OFFSET) current; } int federatedx_io_mysql::seek_position(FEDERATEDX_IO_RESULT **io_result, diff --git a/storage/federatedx/federatedx_io_null.cc b/storage/federatedx/federatedx_io_null.cc index 1976f22124a..b1058dbd2f5 100644 --- a/storage/federatedx/federatedx_io_null.cc +++ b/storage/federatedx/federatedx_io_null.cc @@ -90,7 +90,8 @@ public: virtual void free_result(FEDERATEDX_IO_RESULT *io_result); virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result); virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result); - virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result); + virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result, + FEDERATEDX_IO_ROWS **current= NULL); virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result); virtual const char *get_column_data(FEDERATEDX_IO_ROW *row, unsigned int column); @@ -98,7 +99,7 @@ public: unsigned int column) const; virtual size_t get_ref_length() const; virtual void mark_position(FEDERATEDX_IO_RESULT *io_result, - void *ref); + void *ref, FEDERATEDX_IO_ROWS *current); virtual int seek_position(FEDERATEDX_IO_RESULT **io_result, const void *ref); }; @@ -242,7 +243,8 @@ my_ulonglong federatedx_io_null::get_num_rows(FEDERATEDX_IO_RESULT *) } -FEDERATEDX_IO_ROW *federatedx_io_null::fetch_row(FEDERATEDX_IO_RESULT *) +FEDERATEDX_IO_ROW *federatedx_io_null::fetch_row(FEDERATEDX_IO_RESULT *, + FEDERATEDX_IO_ROWS **current) { return NULL; } @@ -288,7 +290,7 @@ size_t federatedx_io_null::get_ref_length() const void federatedx_io_null::mark_position(FEDERATEDX_IO_RESULT *io_result, - void *ref) + void *ref, FEDERATEDX_IO_ROWS *current) { } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 3c2b4cdc25b..19b56980714 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -871,7 +871,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, ulong *lengths; Field **field; int column= 0; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); Time_zone *saved_time_zone= table->in_use->variables.time_zone; DBUG_ENTER("ha_federatedx::convert_row_to_internal_format"); @@ -900,7 +900,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, (*field)->move_field_offset(-old_ptr); } table->in_use->variables.time_zone= saved_time_zone; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(0); } @@ -1229,7 +1229,6 @@ bool ha_federatedx::create_where_from_key(String *to, String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; Time_zone *saved_time_zone= table->in_use->variables.time_zone; - my_bitmap_map *old_map; DBUG_ENTER("ha_federatedx::create_where_from_key"); tmp.length(0); @@ -1237,7 +1236,7 @@ bool ha_federatedx::create_where_from_key(String *to, DBUG_RETURN(1); table->in_use->variables.time_zone= UTC; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (uint i= 0; i <= 1; i++) { bool needs_quotes; @@ -1413,7 +1412,7 @@ prepare_for_next_key_part: tmp.c_ptr_quick())); } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); table->in_use->variables.time_zone= saved_time_zone; if (both_not_null) @@ -1429,7 +1428,7 @@ prepare_for_next_key_part: DBUG_RETURN(0); err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); table->in_use->variables.time_zone= saved_time_zone; DBUG_RETURN(1); } @@ -2004,7 +2003,7 @@ int ha_federatedx::write_row(const uchar *buf) sizeof(insert_field_value_buffer), &my_charset_bin); Time_zone *saved_time_zone= table->in_use->variables.time_zone; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("ha_federatedx::write_row"); table->in_use->variables.time_zone= UTC; @@ -2059,7 +2058,7 @@ int ha_federatedx::write_row(const uchar *buf) values_string.append(STRING_WITH_LEN(", ")); } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); table->in_use->variables.time_zone= saved_time_zone; /* @@ -2384,7 +2383,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) else { /* otherwise = */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) @@ -2393,7 +2392,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) if (needs_quote) update_string.append(value_quote_char); field_value.length(0); - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); } update_string.append(STRING_WITH_LEN(", ")); } @@ -2942,7 +2941,7 @@ int ha_federatedx::read_next(uchar *buf, FEDERATEDX_IO_RESULT *result) DBUG_RETURN(retval); /* Fetch a row, insert it back in a row format. */ - if (!(row= io->fetch_row(result))) + if (!(row= io->fetch_row(result, ¤t))) DBUG_RETURN(HA_ERR_END_OF_FILE); if (!(retval= convert_row_to_internal_format(buf, row, result))) @@ -2986,7 +2985,7 @@ void ha_federatedx::position(const uchar *record __attribute__ ((unused))) if (txn->acquire(share, ha_thd(), TRUE, &io)) DBUG_VOID_RETURN; - io->mark_position(stored_result, ref); + io->mark_position(stored_result, ref, current); position_called= TRUE; @@ -3420,7 +3419,9 @@ int ha_federatedx::create(const char *name, TABLE *table_arg, { FEDERATEDX_SERVER server; - fill_server(thd->mem_root, &server, &tmp_share, create_info->table_charset); + // It's possibly wrong to use alter_table_convert_to_charset here. + fill_server(thd->mem_root, &server, &tmp_share, + create_info->alter_table_convert_to_charset); #ifndef DBUG_OFF mysql_mutex_init(fe_key_mutex_FEDERATEDX_SERVER_mutex, diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h index 1870a83d13d..7b6504db93d 100644 --- a/storage/federatedx/ha_federatedx.h +++ b/storage/federatedx/ha_federatedx.h @@ -131,6 +131,7 @@ typedef struct st_federatedx_share { typedef struct st_federatedx_result FEDERATEDX_IO_RESULT; typedef struct st_federatedx_row FEDERATEDX_IO_ROW; +typedef struct st_federatedx_rows FEDERATEDX_IO_ROWS; typedef ptrdiff_t FEDERATEDX_IO_OFFSET; class federatedx_io @@ -207,7 +208,8 @@ public: virtual void free_result(FEDERATEDX_IO_RESULT *io_result)=0; virtual unsigned int get_num_fields(FEDERATEDX_IO_RESULT *io_result)=0; virtual my_ulonglong get_num_rows(FEDERATEDX_IO_RESULT *io_result)=0; - virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result)=0; + virtual FEDERATEDX_IO_ROW *fetch_row(FEDERATEDX_IO_RESULT *io_result, + FEDERATEDX_IO_ROWS **current= NULL)=0; virtual ulong *fetch_lengths(FEDERATEDX_IO_RESULT *io_result)=0; virtual const char *get_column_data(FEDERATEDX_IO_ROW *row, unsigned int column)=0; @@ -216,7 +218,7 @@ public: virtual size_t get_ref_length() const=0; virtual void mark_position(FEDERATEDX_IO_RESULT *io_result, - void *ref)=0; + void *ref, FEDERATEDX_IO_ROWS *current)=0; virtual int seek_position(FEDERATEDX_IO_RESULT **io_result, const void *ref)=0; virtual void set_thd(void *thd) { } @@ -269,6 +271,7 @@ class ha_federatedx: public handler federatedx_txn *txn; federatedx_io *io; FEDERATEDX_IO_RESULT *stored_result; + FEDERATEDX_IO_ROWS *current; /** Array of all stored results we get during a query execution. */ diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index ff5bb4b3ec4..afbb929c372 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -363,9 +363,6 @@ int ha_heap::info(uint flag) { HEAPINFO hp_info; - if (!table) - return 0; - (void) heap_info(file,&hp_info,flag); errkey= hp_info.errkey; diff --git a/storage/innobase/.clang-format b/storage/innobase/.clang-format-old index 54f7b47bc88..54f7b47bc88 100644 --- a/storage/innobase/.clang-format +++ b/storage/innobase/.clang-format-old diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 8ee7d167805..aca02ea3998 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -898,7 +898,7 @@ btr_page_get_father_node_ptr_func( node_ptr = btr_cur_get_rec(cursor); - offsets = rec_get_offsets(node_ptr, index, offsets, false, + offsets = rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED, &heap); if (btr_node_ptr_get_child_page_no(node_ptr, offsets) != page_no) { @@ -915,10 +915,11 @@ btr_page_get_father_node_ptr_func( print_rec = page_rec_get_next( page_get_infimum_rec(page_align(user_rec))); offsets = rec_get_offsets(print_rec, index, offsets, - page_rec_is_leaf(user_rec), + page_rec_is_leaf(user_rec) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); page_rec_print(print_rec, offsets); - offsets = rec_get_offsets(node_ptr, index, offsets, false, + offsets = rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED, &heap); page_rec_print(node_ptr, offsets); @@ -2284,7 +2285,9 @@ btr_page_get_split_rec( incl_data += insert_size; } else { offsets = rec_get_offsets(rec, cursor->index, offsets, - page_is_leaf(page), + page_is_leaf(page) + ? cursor->index->n_core_fields + : 0, ULINT_UNDEFINED, &heap); incl_data += rec_offs_size(offsets); } @@ -2393,7 +2396,9 @@ btr_page_insert_fits( space after rec is removed from page. */ *offsets = rec_get_offsets(rec, cursor->index, *offsets, - page_is_leaf(page), + page_is_leaf(page) + ? cursor->index->n_core_fields + : 0, ULINT_UNDEFINED, heap); total_data -= rec_offs_size(*offsets); @@ -2680,7 +2685,8 @@ btr_page_tuple_smaller( first_rec = page_cur_get_rec(&pcur); *offsets = rec_get_offsets( - first_rec, cursor->index, *offsets, page_is_leaf(block->frame), + first_rec, cursor->index, *offsets, + page_is_leaf(block->frame) ? cursor->index->n_core_fields : 0, n_uniq, heap); return(cmp_dtuple_rec(tuple, first_rec, *offsets) < 0); @@ -2964,7 +2970,9 @@ func_start: first_rec = move_limit = split_rec; *offsets = rec_get_offsets(split_rec, cursor->index, *offsets, - page_is_leaf(page), n_uniq, heap); + page_is_leaf(page) + ? cursor->index->n_core_fields : 0, + n_uniq, heap); insert_left = !tuple || cmp_dtuple_rec(tuple, split_rec, *offsets) < 0; @@ -3730,7 +3738,7 @@ retry: rec_offs* offsets2 = NULL; /* For rtree, we need to update father's mbr. */ - if (dict_index_is_spatial(index)) { + if (index->is_spatial()) { /* We only support merge pages with the same parent page */ if (!rtr_check_same_block( @@ -3748,7 +3756,8 @@ retry: offsets2 = rec_get_offsets( btr_cur_get_rec(&cursor2), index, NULL, - page_is_leaf(cursor2.page_cur.block->frame), + page_is_leaf(cursor2.page_cur.block->frame) + ? index->n_fields : 0, ULINT_UNDEFINED, &heap); /* Check if parent entry needs to be updated */ @@ -3922,13 +3931,14 @@ retry: #endif /* UNIV_DEBUG */ /* For rtree, we need to update father's mbr. */ - if (dict_index_is_spatial(index)) { + if (index->is_spatial()) { rec_offs* offsets2; ulint rec_info; offsets2 = rec_get_offsets( btr_cur_get_rec(&cursor2), index, NULL, - page_is_leaf(cursor2.page_cur.block->frame), + page_is_leaf(cursor2.page_cur.block->frame) + ? index->n_fields : 0, ULINT_UNDEFINED, &heap); ut_ad(btr_node_ptr_get_child_page_no( @@ -4094,12 +4104,13 @@ btr_discard_only_page_on_level( mtr_t* mtr) /*!< in: mtr */ { ulint page_level = 0; - trx_id_t max_trx_id; ut_ad(!index->is_dummy); /* Save the PAGE_MAX_TRX_ID from the leaf page. */ - max_trx_id = page_get_max_trx_id(buf_block_get_frame(block)); + const trx_id_t max_trx_id = page_get_max_trx_id(block->frame); + const rec_t* r = page_rec_get_next(page_get_infimum_rec(block->frame)); + ut_ad(rec_is_metadata(r, *index) == index->is_instant()); while (block->page.id.page_no() != dict_index_get_page(index)) { btr_cur_t cursor; @@ -4150,16 +4161,14 @@ btr_discard_only_page_on_level( } #endif /* UNIV_BTR_DEBUG */ - mem_heap_t* heap = NULL; - const rec_t* rec = NULL; - rec_offs* offsets = NULL; + mem_heap_t* heap = nullptr; + const rec_t* rec = nullptr; + rec_offs* offsets = nullptr; if (index->table->instant) { - const rec_t* r = page_rec_get_next(page_get_infimum_rec( - block->frame)); - ut_ad(rec_is_metadata(r, *index) == index->is_instant()); if (rec_is_alter_metadata(r, *index)) { heap = mem_heap_create(srv_page_size); - offsets = rec_get_offsets(r, index, NULL, true, + offsets = rec_get_offsets(r, index, nullptr, + index->n_core_fields, ULINT_UNDEFINED, &heap); rec = rec_copy(mem_heap_alloc(heap, rec_offs_size(offsets)), @@ -4433,7 +4442,7 @@ btr_print_recursive( node_ptr = page_cur_get_rec(&cursor); *offsets = rec_get_offsets( - node_ptr, index, *offsets, false, + node_ptr, index, *offsets, 0, ULINT_UNDEFINED, heap); btr_print_recursive(index, btr_node_ptr_get_child(node_ptr, @@ -4582,7 +4591,9 @@ btr_index_rec_validate( page = page_align(rec); - if (dict_index_is_ibuf(index)) { + ut_ad(index->n_core_fields); + + if (index->is_ibuf()) { /* The insert buffer index tree can contain records from any other index: we cannot check the number of fields or their length */ @@ -4646,7 +4657,8 @@ n_field_mismatch: } } - offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page), + offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); const dict_field_t* field = index->fields; ut_ad(rec_offs_n_fields(offsets) @@ -4668,6 +4680,16 @@ n_field_mismatch: } else { fixed_size = dict_col_get_fixed_size( field->col, page_is_comp(page)); + if (rec_offs_nth_extern(offsets, i)) { + const byte* data = rec_get_nth_field( + rec, offsets, i, &len); + len -= BTR_EXTERN_FIELD_REF_SIZE; + ulint extern_len = mach_read_from_4( + data + len + BTR_EXTERN_LEN + 4); + if (fixed_size == extern_len) { + goto next_field; + } + } } /* Note that if fixed_size != 0, it equals the @@ -4700,7 +4722,7 @@ len_mismatch: } return(FALSE); } - +next_field: field++; } @@ -4893,7 +4915,7 @@ btr_validate_level( page_cur_move_to_next(&cursor); node_ptr = page_cur_get_rec(&cursor); - offsets = rec_get_offsets(node_ptr, index, offsets, false, + offsets = rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED, &heap); savepoint2 = mtr_set_savepoint(&mtr); @@ -5017,10 +5039,12 @@ loop: right_rec = page_rec_get_next(page_get_infimum_rec( right_page)); offsets = rec_get_offsets(rec, index, offsets, - page_is_leaf(page), + page_is_leaf(page) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); offsets2 = rec_get_offsets(right_rec, index, offsets2, - page_is_leaf(right_page), + page_is_leaf(right_page) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); /* For spatial index, we cannot guarantee the key ordering diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc index 51c91c5b037..65cb6e83783 100644 --- a/storage/innobase/btr/btr0bulk.cc +++ b/storage/innobase/btr/btr0bulk.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2019, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -193,7 +193,8 @@ PageBulk::insert( if (!page_rec_is_infimum_low(page_offset(m_cur_rec))) { rec_t* old_rec = m_cur_rec; rec_offs* old_offsets = rec_get_offsets( - old_rec, m_index, NULL, is_leaf, + old_rec, m_index, NULL, is_leaf + ? m_index->n_core_fields : 0, ULINT_UNDEFINED, &m_heap); ut_ad(cmp_rec_rec(rec, old_rec, offsets, old_offsets, m_index) @@ -447,6 +448,7 @@ PageBulk::getSplitRec() ut_ad(m_page_zip != NULL); ut_ad(m_rec_no >= 2); + ut_ad(!m_index->is_instant()); ut_ad(page_get_free_space_of_empty(m_is_comp) > m_free_space); total_used_size = page_get_free_space_of_empty(m_is_comp) @@ -456,13 +458,13 @@ PageBulk::getSplitRec() n_recs = 0; offsets = NULL; rec = page_get_infimum_rec(m_page); + const ulint n_core = page_is_leaf(m_page) ? m_index->n_core_fields : 0; do { rec = page_rec_get_next(rec); ut_ad(page_rec_is_user_rec(rec)); - offsets = rec_get_offsets(rec, m_index, offsets, - page_is_leaf(m_page), + offsets = rec_get_offsets(rec, m_index, offsets, n_core, ULINT_UNDEFINED, &m_heap); total_recs_size += rec_offs_size(offsets); n_recs++; @@ -491,9 +493,11 @@ PageBulk::copyIn( ut_ad(m_rec_no == 0); ut_ad(page_rec_is_user_rec(rec)); + const ulint n_core = page_rec_is_leaf(rec) + ? m_index->n_core_fields : 0; + do { - offsets = rec_get_offsets(rec, m_index, offsets, - page_rec_is_leaf(split_rec), + offsets = rec_get_offsets(rec, m_index, offsets, n_core, ULINT_UNDEFINED, &m_heap); insert(rec, offsets); @@ -534,8 +538,10 @@ PageBulk::copyOut( /* Set last record's next in page */ rec_offs* offsets = NULL; rec = page_rec_get_prev(split_rec); - offsets = rec_get_offsets(rec, m_index, offsets, - page_rec_is_leaf(split_rec), + const ulint n_core = page_rec_is_leaf(split_rec) + ? m_index->n_core_fields : 0; + + offsets = rec_get_offsets(rec, m_index, offsets, n_core, ULINT_UNDEFINED, &m_heap); page_rec_set_next(rec, page_get_supremum_rec(m_page)); @@ -543,8 +549,7 @@ PageBulk::copyOut( m_cur_rec = rec; m_heap_top = rec_get_end(rec, offsets); - offsets = rec_get_offsets(last_rec, m_index, offsets, - page_rec_is_leaf(split_rec), + offsets = rec_get_offsets(last_rec, m_index, offsets, n_core, ULINT_UNDEFINED, &m_heap); m_free_space += ulint(rec_get_end(last_rec, offsets) - m_heap_top) @@ -976,7 +981,8 @@ BtrBulk::insert( /* Convert tuple to rec. */ rec = rec_convert_dtuple_to_rec(static_cast<byte*>(mem_heap_alloc( page_bulk->m_heap, rec_size)), m_index, tuple, n_ext); - offsets = rec_get_offsets(rec, m_index, offsets, !level, + offsets = rec_get_offsets(rec, m_index, offsets, level + ? 0 : m_index->n_core_fields, ULINT_UNDEFINED, &page_bulk->m_heap); page_bulk->insert(rec, offsets); diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 59c6d06d5af..aeb5e2aaa9c 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -3,7 +3,7 @@ Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -67,6 +67,9 @@ Created 10/16/1994 Heikki Tuuri #include "srv0start.h" #include "mysql_com.h" #include "dict0stats.h" +#ifdef WITH_WSREP +#include "mysql/service_wsrep.h" +#endif /* WITH_WSREP */ /** Buffered B-tree operation types, introduced as part of delete buffering. */ enum btr_op_t { @@ -592,7 +595,8 @@ incompatible: } mem_heap_t* heap = NULL; - rec_offs* offsets = rec_get_offsets(rec, index, NULL, true, + rec_offs* offsets = rec_get_offsets(rec, index, NULL, + index->n_core_fields, ULINT_UNDEFINED, &heap); if (rec_offs_any_default(offsets)) { inconsistent: @@ -1189,7 +1193,6 @@ static ulint btr_node_ptr_max_size(const dict_index_t* index) TABLE_STATS_NAME) || !strcmp(index->table->name.m_name, INDEX_STATS_NAME))) { - ut_ad(!strcmp(field->name, "table_name")); /* Interpret "table_name" as VARCHAR(199) even if it was incorrectly defined as VARCHAR(64). While the caller of ha_innobase enforces the @@ -2047,7 +2050,7 @@ retry_page_get: node_ptr = page_cur_get_rec(page_cursor); - offsets = rec_get_offsets(node_ptr, index, offsets, false, + offsets = rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED, &heap); /* If the rec is the first or last in the page for @@ -2178,7 +2181,7 @@ need_opposite_intention: offsets2 = rec_get_offsets( first_rec, index, offsets2, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); cmp_rec_rec(node_ptr, first_rec, offsets, offsets2, index, false, &matched_fields); @@ -2196,7 +2199,7 @@ need_opposite_intention: offsets2 = rec_get_offsets( last_rec, index, offsets2, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); cmp_rec_rec( node_ptr, last_rec, offsets, offsets2, index, @@ -2365,7 +2368,7 @@ need_opposite_intention: offsets = rec_get_offsets( my_node_ptr, index, offsets, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); ulint my_page_no = btr_node_ptr_get_child_page_no( @@ -2818,7 +2821,7 @@ btr_cur_open_at_index_side_func( node_ptr = page_cur_get_rec(page_cursor); offsets = rec_get_offsets(node_ptr, cursor->index, offsets, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); /* If the rec is the first or last in the page for pessimistic delete intention, it might cause node_ptr insert @@ -3113,7 +3116,7 @@ btr_cur_open_at_rnd_pos_func( node_ptr = page_cur_get_rec(page_cursor); offsets = rec_get_offsets(node_ptr, cursor->index, offsets, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); /* If the rec is the first or last in the page for pessimistic delete intention, it might cause node_ptr insert @@ -3288,7 +3291,8 @@ btr_cur_ins_lock_and_undo( /* Check if there is predicate or GAP lock preventing the insertion */ if (!(flags & BTR_NO_LOCKING_FLAG)) { - if (dict_index_is_spatial(index)) { + const unsigned type = index->type; + if (UNIV_UNLIKELY(type & DICT_SPATIAL)) { lock_prdt_t prdt; rtr_mbr_t mbr; @@ -3305,9 +3309,30 @@ btr_cur_ins_lock_and_undo( index, thr, mtr, &prdt); *inherit = false; } else { +#ifdef WITH_WSREP + trx_t* trx= thr_get_trx(thr); + /* If transaction scanning an unique secondary + key is wsrep high priority thread (brute + force) this scanning may involve GAP-locking + in the index. As this locking happens also + when applying replication events in high + priority applier threads, there is a + probability for lock conflicts between two + wsrep high priority threads. To avoid this + GAP-locking we mark that this transaction + is using unique key scan here. */ + if ((type & (DICT_CLUSTERED | DICT_UNIQUE)) == DICT_UNIQUE + && trx->is_wsrep() + && wsrep_thd_is_BF(trx->mysql_thd, false)) { + trx->wsrep_UK_scan= true; + } +#endif /* WITH_WSREP */ err = lock_rec_insert_check_and_lock( flags, rec, btr_cur_get_block(cursor), index, thr, mtr, inherit); +#ifdef WITH_WSREP + trx->wsrep_UK_scan= false; +#endif /* WITH_WSREP */ } } @@ -3573,7 +3598,8 @@ fail_err: ut_ad(thr->graph->trx->id == trx_read_trx_id( static_cast<const byte*>( - trx_id->data))); + trx_id->data)) + || index->table->is_temporary()); } } #endif @@ -4093,7 +4119,8 @@ btr_cur_parse_update_in_place( flags != (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG | BTR_KEEP_SYS_FLAG) - || page_is_leaf(page), + || page_is_leaf(page) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); if (!(flags & BTR_KEEP_SYS_FLAG)) { @@ -4234,7 +4261,8 @@ btr_cur_update_in_place( index = cursor->index; ut_ad(rec_offs_validate(rec, index, offsets)); ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); - ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)); + ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG) + || index->table->is_temporary()); /* The insert buffer tree should never be updated in place. */ ut_ad(!dict_index_is_ibuf(index)); ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG) @@ -4535,7 +4563,8 @@ btr_cur_optimistic_update( page = buf_block_get_frame(block); rec = btr_cur_get_rec(cursor); index = cursor->index; - ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG)); + ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG) + || index->table->is_temporary()); ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table)); ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); /* This is intended only for leaf page updates */ @@ -4551,7 +4580,7 @@ btr_cur_optimistic_update( ut_ad(fil_page_index_page_check(page)); ut_ad(btr_page_get_index_id(page) == index->id); - *offsets = rec_get_offsets(rec, index, *offsets, true, + *offsets = rec_get_offsets(rec, index, *offsets, index->n_core_fields, ULINT_UNDEFINED, heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(rec, *offsets) @@ -4892,8 +4921,8 @@ btr_cur_pessimistic_update( ut_ad(!page_zip || !index->table->is_temporary()); /* The insert buffer tree should never be updated in place. */ ut_ad(!dict_index_is_ibuf(index)); - ut_ad(trx_id > 0 - || (flags & BTR_KEEP_SYS_FLAG)); + ut_ad(trx_id > 0 || (flags & BTR_KEEP_SYS_FLAG) + || index->table->is_temporary()); ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG) || dict_index_is_clust(index)); ut_ad(thr_get_trx(thr)->id == trx_id @@ -5409,7 +5438,8 @@ btr_cur_parse_del_mark_set_clust_rec( if (!(flags & BTR_KEEP_SYS_FLAG)) { row_upd_rec_sys_fields_in_recovery( rec, page_zip, - rec_get_offsets(rec, index, offsets, true, + rec_get_offsets(rec, index, offsets, + index->n_core_fields, pos + 2, &heap), pos, trx_id, roll_ptr); } else { @@ -5418,7 +5448,8 @@ btr_cur_parse_del_mark_set_clust_rec( ut_ad(memcmp(rec_get_nth_field( rec, rec_get_offsets(rec, index, - offsets, true, + offsets, index + ->n_core_fields, pos, &heap), pos, &offset), field_ref_zero, DATA_TRX_ID_LEN)); @@ -5753,7 +5784,8 @@ btr_cur_optimistic_delete_func( rec = btr_cur_get_rec(cursor); - offsets = rec_get_offsets(rec, cursor->index, offsets, true, + offsets = rec_get_offsets(rec, cursor->index, offsets, + cursor->index->n_core_fields, ULINT_UNDEFINED, &heap); const ibool no_compress_needed = !rec_offs_any_extern(offsets) @@ -5961,7 +5993,8 @@ btr_cur_pessimistic_delete( ut_a(!page_zip || page_zip_validate(page_zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ - offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page), + offsets = rec_get_offsets(rec, index, NULL, page_is_leaf(page) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); if (rec_offs_any_extern(offsets)) { @@ -6061,7 +6094,7 @@ discard_page: pointer as the predefined minimum record */ min_mark_next_rec = true; - } else if (dict_index_is_spatial(index)) { + } else if (index->is_spatial()) { /* For rtree, if delete the leftmost node pointer, we need to update parent page. */ rtr_mbr_t father_mbr; @@ -6076,7 +6109,7 @@ discard_page: &father_cursor); offsets = rec_get_offsets( btr_cur_get_rec(&father_cursor), index, NULL, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); father_rec = btr_cur_get_rec(&father_cursor); rtr_read_mbr(rec_get_nth_field( @@ -6998,12 +7031,13 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index) page = btr_cur_get_page(&cursor); rec = page_rec_get_next(page_get_infimum_rec(page)); - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) + ? index->n_core_fields : 0; if (!page_rec_is_supremum(rec)) { not_empty_flag = 1; offsets_rec = rec_get_offsets(rec, index, offsets_rec, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); if (n_not_null != NULL) { @@ -7024,7 +7058,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index) offsets_next_rec = rec_get_offsets(next_rec, index, offsets_next_rec, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index a68d6fa771d..b22d9f8323d 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2012, 2014 Facebook, Inc. All Rights Reserved. -Copyright (C) 2014, 2019, MariaDB Corporation. +Copyright (C) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -340,12 +340,12 @@ btr_defragment_calc_n_recs_for_size( ulint size = 0; page_cur_t cur; + const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0; page_cur_set_before_first(block, &cur); page_cur_move_to_next(&cur); while (page_cur_get_rec(&cur) != page_get_supremum_rec(page)) { rec_t* cur_rec = page_cur_get_rec(&cur); - offsets = rec_get_offsets(cur_rec, index, offsets, - page_is_leaf(page), + offsets = rec_get_offsets(cur_rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); ulint rec_size = rec_offs_size(offsets); size += rec_size; @@ -357,6 +357,9 @@ btr_defragment_calc_n_recs_for_size( page_cur_move_to_next(&cur); } *n_recs_size = size; + if (UNIV_LIKELY_NULL(heap)) { + mem_heap_free(heap); + } return n_recs; } diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc index 9c5216dc015..2c3f06da111 100644 --- a/storage/innobase/btr/btr0pcur.cc +++ b/storage/innobase/btr/btr0pcur.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2020, MariaDB Corporation. +Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -61,6 +61,7 @@ btr_pcur_reset( cursor->btr_cur.index = NULL; cursor->btr_cur.page_cur.rec = NULL; cursor->old_rec = NULL; + cursor->old_n_core_fields = 0; cursor->old_n_fields = 0; cursor->old_stored = false; @@ -151,7 +152,8 @@ before_first: ut_ad(!page_rec_is_infimum(rec)); if (UNIV_UNLIKELY(rec_is_metadata(rec, *index))) { - ut_ad(index->table->instant); + ut_ad(index->table->instant + || block->page.id.page_no() != index->page); ut_ad(page_get_n_recs(block->frame) == 1); ut_ad(page_is_leaf(block->frame)); ut_ad(!page_has_prev(block->frame)); @@ -165,11 +167,8 @@ before_first: if (rec_is_metadata(rec, *index)) { ut_ad(!page_has_prev(block->frame)); - ut_d(const rec_t* p = rec); rec = page_rec_get_next(rec); if (page_rec_is_supremum(rec)) { - ut_ad(page_has_next(block->frame) - || rec_is_alter_metadata(p, *index)); goto before_first; } } @@ -181,19 +180,21 @@ before_first: if (index->is_ibuf()) { ut_ad(!index->table->not_redundant()); - cursor->old_n_fields = rec_get_n_fields_old(rec); - } else if (page_rec_is_leaf(rec)) { - cursor->old_n_fields = dict_index_get_n_unique_in_tree(index); - } else if (index->is_spatial()) { - ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index) - == DICT_INDEX_SPATIAL_NODEPTR_SIZE); - /* For R-tree, we have to compare - the child page numbers as well. */ - cursor->old_n_fields = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1; + cursor->old_n_fields = uint16_t(rec_get_n_fields_old(rec)); } else { - cursor->old_n_fields = dict_index_get_n_unique_in_tree(index); + cursor->old_n_fields = static_cast<uint16>( + dict_index_get_n_unique_in_tree(index)); + if (index->is_spatial() && !page_rec_is_leaf(rec)) { + ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index) + == DICT_INDEX_SPATIAL_NODEPTR_SIZE); + /* For R-tree, we have to compare + the child page numbers as well. */ + cursor->old_n_fields + = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1; + } } + cursor->old_n_core_fields = index->n_core_fields; cursor->old_rec = rec_copy_prefix_to_buf(rec, index, cursor->old_n_fields, &cursor->old_rec_buf, @@ -228,6 +229,7 @@ btr_pcur_copy_stored_position( + (pcur_donate->old_rec - pcur_donate->old_rec_buf); } + pcur_receive->old_n_core_fields = pcur_donate->old_n_core_fields; pcur_receive->old_n_fields = pcur_donate->old_n_fields; } @@ -319,6 +321,8 @@ btr_pcur_restore_position_func( } ut_a(cursor->old_rec); + ut_a(cursor->old_n_core_fields); + ut_a(cursor->old_n_core_fields <= index->n_core_fields); ut_a(cursor->old_n_fields); switch (latch_mode) { @@ -352,11 +356,16 @@ btr_pcur_restore_position_func( rec_offs_init(offsets2_); heap = mem_heap_create(256); + ut_ad(cursor->old_n_core_fields + == index->n_core_fields); + offsets1 = rec_get_offsets( - cursor->old_rec, index, offsets1, true, + cursor->old_rec, index, offsets1, + cursor->old_n_core_fields, cursor->old_n_fields, &heap); offsets2 = rec_get_offsets( - rec, index, offsets2, true, + rec, index, offsets2, + index->n_core_fields, cursor->old_n_fields, &heap); ut_ad(!cmp_rec_rec(cursor->old_rec, @@ -381,8 +390,14 @@ btr_pcur_restore_position_func( heap = mem_heap_create(256); - tuple = dict_index_build_data_tuple(cursor->old_rec, index, true, - cursor->old_n_fields, heap); + tuple = dtuple_create(heap, cursor->old_n_fields); + + dict_index_copy_types(tuple, index, cursor->old_n_fields); + + rec_copy_prefix_to_dtuple(tuple, cursor->old_rec, index, + cursor->old_n_core_fields, + cursor->old_n_fields, heap); + ut_ad(dtuple_check_typed(tuple)); /* Save the old search mode of the cursor */ old_mode = cursor->search_mode; @@ -421,7 +436,8 @@ btr_pcur_restore_position_func( && btr_pcur_is_on_user_rec(cursor) && !cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor), rec_get_offsets(btr_pcur_get_rec(cursor), - index, offsets, true, + index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap))) { /* We have to store the NEW value for the modify clock, diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index 6a1163e5cf9..2eae4cf503f 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -2,7 +2,7 @@ Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -666,6 +666,12 @@ btr_search_update_hash_ref( return; } + if (cursor->index != index) { + ut_ad(cursor->index->id == index->id); + btr_search_drop_page_hash_index(block); + return; + } + ut_ad(block->page.id.space() == index->table->space_id); ut_ad(index == cursor->index); ut_ad(!dict_index_is_ibuf(index)); @@ -690,7 +696,8 @@ btr_search_update_hash_ref( ulint fold = rec_fold( rec, - rec_get_offsets(rec, index, offsets_, true, + rec_get_offsets(rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &heap), block->curr_n_fields, block->curr_n_bytes, index->id); @@ -749,7 +756,8 @@ btr_search_check_guess( match = 0; - offsets = rec_get_offsets(rec, cursor->index, offsets, true, + offsets = rec_get_offsets(rec, cursor->index, offsets, + cursor->index->n_core_fields, n_unique, &heap); cmp = cmp_dtuple_rec_with_match(tuple, rec, offsets, &match); @@ -800,7 +808,8 @@ btr_search_check_guess( } offsets = rec_get_offsets(prev_rec, cursor->index, offsets, - true, n_unique, &heap); + cursor->index->n_core_fields, + n_unique, &heap); cmp = cmp_dtuple_rec_with_match( tuple, prev_rec, offsets, &match); if (mode == PAGE_CUR_GE) { @@ -823,7 +832,8 @@ btr_search_check_guess( } offsets = rec_get_offsets(next_rec, cursor->index, offsets, - true, n_unique, &heap); + cursor->index->n_core_fields, + n_unique, &heap); cmp = cmp_dtuple_rec_with_match( tuple, next_rec, offsets, &match); if (mode == PAGE_CUR_LE) { @@ -1125,15 +1135,26 @@ retry: % btr_ahi_parts; latch = btr_search_latches[ahi_slot]; - rw_lock_s_lock(latch); + dict_index_t* index = block->index; + + bool is_freed = index && index->freed(); + if (is_freed) { + rw_lock_x_lock(latch); + } else { + rw_lock_s_lock(latch); + } + assert_block_ahi_valid(block); - if (!block->index || !btr_search_enabled) { - rw_lock_s_unlock(latch); + if (!index || !btr_search_enabled) { + if (is_freed) { + rw_lock_x_unlock(latch); + } else { + rw_lock_s_unlock(latch); + } return; } - dict_index_t* index = block->index; #ifdef MYSQL_INDEX_DISABLE_AHI ut_ad(!index->disable_ahi); #endif @@ -1149,7 +1170,9 @@ retry: /* NOTE: The AHI fields of block must not be accessed after releasing search latch, as the index page might only be s-latched! */ - rw_lock_s_unlock(latch); + if (!is_freed) { + rw_lock_s_unlock(latch); + } ut_a(n_fields > 0 || n_bytes > 0); @@ -1176,7 +1199,7 @@ retry: while (!page_rec_is_supremum(rec)) { offsets = rec_get_offsets( - rec, index, offsets, true, + rec, index, offsets, index->n_core_fields, btr_search_get_n_fields(n_fields, n_bytes), &heap); fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id); @@ -1200,16 +1223,18 @@ next_rec: mem_heap_free(heap); } - rw_lock_x_lock(latch); + if (!is_freed) { + rw_lock_x_lock(latch); - if (UNIV_UNLIKELY(!block->index)) { - /* Someone else has meanwhile dropped the hash index */ + if (UNIV_UNLIKELY(!block->index)) { + /* Someone else has meanwhile dropped the + hash index */ + goto cleanup; + } - goto cleanup; + ut_a(block->index == index); } - ut_a(block->index == index); - if (block->curr_n_fields != n_fields || block->curr_n_bytes != n_bytes) { @@ -1400,7 +1425,7 @@ btr_search_build_page_hash_index( ut_a(index->id == btr_page_get_index_id(page)); offsets = rec_get_offsets( - rec, index, offsets, true, + rec, index, offsets, index->n_core_fields, btr_search_get_n_fields(n_fields, n_bytes), &heap); ut_ad(page_rec_is_supremum(rec) @@ -1431,7 +1456,7 @@ btr_search_build_page_hash_index( } offsets = rec_get_offsets( - next_rec, index, offsets, true, + next_rec, index, offsets, index->n_core_fields, btr_search_get_n_fields(n_fields, n_bytes), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, n_bytes, index->id); @@ -1583,6 +1608,7 @@ btr_search_move_or_delete_hash_entries( rw_lock_t* ahi_latch = index ? btr_get_search_latch(index) : NULL; if (new_block->index) { +drop_exit: btr_search_drop_page_hash_index(block); return; } @@ -1594,6 +1620,12 @@ btr_search_move_or_delete_hash_entries( rw_lock_s_lock(ahi_latch); if (block->index) { + + if (block->index != index) { + rw_lock_s_unlock(ahi_latch); + goto drop_exit; + } + ulint n_fields = block->curr_n_fields; ulint n_bytes = block->curr_n_bytes; ibool left_side = block->curr_left_side; @@ -1614,7 +1646,6 @@ btr_search_move_or_delete_hash_entries( ut_ad(left_side == block->curr_left_side); return; } - rw_lock_s_unlock(ahi_latch); } @@ -1652,6 +1683,12 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) return; } + if (index != cursor->index) { + ut_ad(index->id == cursor->index->id); + btr_search_drop_page_hash_index(block); + return; + } + ut_ad(block->page.id.space() == index->table->space_id); ut_a(index == cursor->index); ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0); @@ -1659,7 +1696,8 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor) rec = btr_cur_get_rec(cursor); - fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_, true, + fold = rec_fold(rec, rec_get_offsets(rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &heap), block->curr_n_fields, block->curr_n_bytes, index->id); if (UNIV_LIKELY_NULL(heap)) { @@ -1725,6 +1763,12 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) return; } + if (cursor->index != index) { + ut_ad(cursor->index->id == index->id); + btr_search_drop_page_hash_index(block); + return; + } + ut_a(cursor->index == index); ut_ad(!dict_index_is_ibuf(index)); rw_lock_x_lock(ahi_latch); @@ -1814,6 +1858,12 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) #ifdef MYSQL_INDEX_DISABLE_AHI ut_a(!index->disable_ahi); #endif + if (index != cursor->index) { + ut_ad(index->id == cursor->index->id); + btr_search_drop_page_hash_index(block); + return; + } + ut_a(index == cursor->index); ut_ad(!dict_index_is_ibuf(index)); @@ -1824,13 +1874,14 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) ins_rec = page_rec_get_next_const(rec); next_rec = page_rec_get_next_const(ins_rec); - offsets = rec_get_offsets(ins_rec, index, offsets, true, + offsets = rec_get_offsets(ins_rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index->id); if (!page_rec_is_supremum(next_rec)) { offsets = rec_get_offsets( - next_rec, index, offsets, true, + next_rec, index, offsets, index->n_core_fields, btr_search_get_n_fields(n_fields, n_bytes), &heap); next_fold = rec_fold(next_rec, offsets, n_fields, n_bytes, index->id); @@ -1842,7 +1893,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch) if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, *index)) { offsets = rec_get_offsets( - rec, index, offsets, true, + rec, index, offsets, index->n_core_fields, btr_search_get_n_fields(n_fields, n_bytes), &heap); fold = rec_fold(rec, offsets, n_fields, n_bytes, index->id); } else { @@ -2048,7 +2099,8 @@ btr_search_hash_table_validate(ulint hash_table_id) page_index_id = btr_page_get_index_id(block->frame); offsets = rec_get_offsets( - node->data, block->index, offsets, true, + node->data, block->index, offsets, + block->index->n_core_fields, btr_search_get_n_fields(block->curr_n_fields, block->curr_n_bytes), &heap); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 1462c847e09..85bf8f2a059 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1911,6 +1911,10 @@ buf_pool_init_instance( ut_free(buf_pool->chunks); buf_pool_mutex_exit(buf_pool); + /* InnoDB should free the mutex which was + created so far before freeing the instance */ + mutex_free(&buf_pool->mutex); + mutex_free(&buf_pool->zip_mutex); return(DB_ERROR); } @@ -5713,6 +5717,9 @@ loop: memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); memset(frame + FIL_PAGE_LSN, 0, 8); + /* mark page as just allocated for check in + buf_flush_init_for_writing() */ + ut_d(memset(frame + FIL_PAGE_SPACE_OR_CHKSUM, 0, 4)); #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG ut_a(++buf_dbg_counter % 5771 || buf_validate()); diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 45c9b661d6f..2cfca67ddd1 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -783,7 +783,17 @@ buf_flush_init_for_writing( || &block->page.zip == page_zip_); ut_ad(!block || newest_lsn); ut_ad(page); - ut_ad(!newest_lsn || fil_page_get_type(page)); + /* Encryption key rotation procedure can write dummy log records to + update page's space id, what causes page LSN update, and we need some + additional check during recovery to be sure the page is freshly + allocated, see buf_page_create() to find such patterns */ + ut_ad(fil_page_get_type(page) + || (!newest_lsn + || (mach_read_from_4(page + FIL_PAGE_SPACE_ID) + == block->page.id.space() + && mach_read_from_4(page + FIL_PAGE_PREV) == 0xffffffff + && mach_read_from_4(page + FIL_PAGE_NEXT) == 0xffffffff + && !mach_read_from_4(page + FIL_PAGE_SPACE_OR_CHKSUM)))); if (page_zip_) { page_zip_des_t* page_zip; diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc index 03c471c35fb..fe849d8ae29 100644 --- a/storage/innobase/data/data0data.cc +++ b/storage/innobase/data/data0data.cc @@ -686,7 +686,7 @@ dtuple_convert_big_rec( goto skip_field; } - longest_i = i; + longest_i = i + mblob; longest = savings; skip_field: @@ -767,7 +767,7 @@ void dtuple_convert_back_big_rec( /*========================*/ dict_index_t* index MY_ATTRIBUTE((unused)), /*!< in: index */ - dtuple_t* entry, /*!< in: entry whose data was put to vector */ + dtuple_t* entry, /*!< in/out: entry whose data was put to vector */ big_rec_t* vector) /*!< in, own: big rec vector; it is freed in this function */ { diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index ea551adea41..8a8095f2226 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -2,7 +2,7 @@ Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -270,7 +270,7 @@ dict_table_try_drop_aborted( && !UT_LIST_GET_FIRST(table->locks)) { /* Silence a debug assertion in row_merge_drop_indexes(). */ ut_d(table->acquire()); - row_merge_drop_indexes(trx, table, TRUE); + row_merge_drop_indexes(trx, table, true); ut_d(table->release()); ut_ad(table->get_ref_count() == ref_count); trx_commit_for_mysql(trx); @@ -4849,7 +4849,9 @@ dict_index_build_node_ptr( dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4); - rec_copy_prefix_to_dtuple(tuple, rec, index, !level, n_unique, heap); + rec_copy_prefix_to_dtuple(tuple, rec, index, + level ? 0 : index->n_core_fields, + n_unique, heap); dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple) | REC_STATUS_NODE_PTR); @@ -4873,11 +4875,14 @@ dict_index_build_data_tuple( ulint n_fields, mem_heap_t* heap) { + ut_ad(!index->is_clust()); + dtuple_t* tuple = dtuple_create(heap, n_fields); dict_index_copy_types(tuple, index, n_fields); - rec_copy_prefix_to_dtuple(tuple, rec, index, leaf, n_fields, heap); + rec_copy_prefix_to_dtuple(tuple, rec, index, + leaf ? n_fields : 0, n_fields, heap); ut_ad(dtuple_check_typed(tuple)); diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc index ddd2b99ef21..2741e29740a 100644 --- a/storage/innobase/dict/dict0mem.cc +++ b/storage/innobase/dict/dict0mem.cc @@ -2,7 +2,7 @@ Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -939,7 +939,7 @@ dict_mem_fill_vcol_from_v_indexes( Later virtual column set will be refreshed during loading of table. */ if (!dict_index_has_virtual(index) - || index->has_new_v_col) { + || index->has_new_v_col()) { continue; } @@ -1375,7 +1375,8 @@ dict_index_t::vers_history_row( rec_t* clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, this, &clust_index, &mtr); if (clust_rec) { - offsets = rec_get_offsets(clust_rec, clust_index, offsets, true, + offsets = rec_get_offsets(clust_rec, clust_index, offsets, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); history_row = clust_index->vers_history_row(clust_rec, offsets); diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index dbb70ec1c6d..c5ff0c56951 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2009, 2019, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -178,8 +178,8 @@ dict_stats_persistent_storage_check( {"table_name", DATA_VARMYSQL, DATA_NOT_NULL, 597}, - {"last_update", DATA_FIXBINARY, - DATA_NOT_NULL, 4}, + {"last_update", DATA_INT, + DATA_NOT_NULL | DATA_UNSIGNED, 4}, {"n_rows", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, @@ -209,8 +209,8 @@ dict_stats_persistent_storage_check( {"index_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, - {"last_update", DATA_FIXBINARY, - DATA_NOT_NULL, 4}, + {"last_update", DATA_INT, + DATA_NOT_NULL | DATA_UNSIGNED, 4}, {"stat_name", DATA_VARMYSQL, DATA_NOT_NULL, 64*3}, @@ -1157,7 +1157,7 @@ dict_stats_analyze_index_level( prev_rec_offsets = rec_get_offsets( prev_rec, index, prev_rec_offsets, - true, + index->n_core_fields, n_uniq, &heap); prev_rec = rec_copy_prefix_to_buf( @@ -1169,8 +1169,9 @@ dict_stats_analyze_index_level( continue; } - rec_offsets = rec_get_offsets( - rec, index, rec_offsets, !level, n_uniq, &heap); + rec_offsets = rec_get_offsets(rec, index, rec_offsets, + level ? 0 : index->n_core_fields, + n_uniq, &heap); (*total_recs)++; @@ -1178,7 +1179,8 @@ dict_stats_analyze_index_level( ulint matched_fields; prev_rec_offsets = rec_get_offsets( - prev_rec, index, prev_rec_offsets, !level, + prev_rec, index, prev_rec_offsets, + level ? 0 : index->n_core_fields, n_uniq, &heap); cmp_rec_rec(prev_rec, rec, @@ -1332,7 +1334,7 @@ be big enough) @param[in] index index of the page @param[in] page the page to scan @param[in] n_prefix look at the first n_prefix columns -@param[in] is_leaf whether this is the leaf page +@param[in] n_core 0, or index->n_core_fields for leaf @param[out] n_diff number of distinct records encountered @param[out] n_external_pages if this is non-NULL then it will be set to the number of externally stored pages which were encountered @@ -1347,7 +1349,7 @@ dict_stats_scan_page( const dict_index_t* index, const page_t* page, ulint n_prefix, - bool is_leaf, + ulint n_core, ib_uint64_t* n_diff, ib_uint64_t* n_external_pages) { @@ -1359,9 +1361,9 @@ dict_stats_scan_page( Because offsets1,offsets2 should be big enough, this memory heap should never be used. */ mem_heap_t* heap = NULL; - ut_ad(is_leaf == page_is_leaf(page)); + ut_ad(!!n_core == page_is_leaf(page)); const rec_t* (*get_next)(const rec_t*) - = !is_leaf || srv_stats_include_delete_marked + = !n_core || srv_stats_include_delete_marked ? page_rec_get_next_const : page_rec_get_next_non_del_marked; @@ -1380,7 +1382,7 @@ dict_stats_scan_page( return(NULL); } - offsets_rec = rec_get_offsets(rec, index, offsets_rec, is_leaf, + offsets_rec = rec_get_offsets(rec, index, offsets_rec, n_core, ULINT_UNDEFINED, &heap); if (should_count_external_pages) { @@ -1397,7 +1399,7 @@ dict_stats_scan_page( ulint matched_fields; offsets_next_rec = rec_get_offsets(next_rec, index, - offsets_next_rec, is_leaf, + offsets_next_rec, n_core, ULINT_UNDEFINED, &heap); @@ -1411,7 +1413,7 @@ dict_stats_scan_page( (*n_diff)++; - if (!is_leaf) { + if (!n_core) { break; } } @@ -1497,7 +1499,7 @@ dict_stats_analyze_index_below_cur( rec = btr_cur_get_rec(cur); ut_ad(!page_rec_is_leaf(rec)); - offsets_rec = rec_get_offsets(rec, index, offsets1, false, + offsets_rec = rec_get_offsets(rec, index, offsets1, 0, ULINT_UNDEFINED, &heap); page_id_t page_id(index->table->space_id, @@ -1531,7 +1533,7 @@ dict_stats_analyze_index_below_cur( /* search for the first non-boring record on the page */ offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - false, n_diff, NULL); + 0, n_diff, NULL); /* pages on level > 0 are not allowed to be empty */ ut_a(offsets_rec != NULL); @@ -1576,7 +1578,7 @@ dict_stats_analyze_index_below_cur( offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - true, n_diff, + index->n_core_fields, n_diff, n_external_pages); #if 0 diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 2d358f2c9e3..c37d89181d9 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -156,9 +156,24 @@ schedule new estimates for table and index statistics to be calculated. void dict_stats_update_if_needed_func(dict_table_t *table) #endif { - ut_ad(table->stat_initialized); ut_ad(!mutex_own(&dict_sys.mutex)); + if (UNIV_UNLIKELY(!table->stat_initialized)) { + /* The table may have been evicted from dict_sys + and reloaded internally by InnoDB for FOREIGN KEY + processing, but not reloaded by the SQL layer. + + We can (re)compute the transient statistics when the + table is actually loaded by the SQL layer. + + Note: If InnoDB persistent statistics are enabled, + we will skip the updates. We must do this, because + dict_table_get_n_rows() below assumes that the + statistics have been initialized. The DBA may have + to execute ANALYZE TABLE. */ + return; + } + ulonglong counter = table->stat_modified_counter++; ulonglong n_rows = dict_table_get_n_rows(table); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 5442ee285db..c5323ed0ffc 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -488,12 +488,16 @@ static bool fil_node_open_file(fil_node_t* node) const bool first_time_open = node->size == 0; - bool o_direct_possible = !FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags); - if (const ulint ssize = FSP_FLAGS_GET_ZIP_SSIZE(space->flags)) { - compile_time_assert(((UNIV_ZIP_SIZE_MIN >> 1) << 3) == 4096); - if (ssize < 3) { - o_direct_possible = false; - } + ulint type; + static_assert(((UNIV_ZIP_SIZE_MIN >> 1) << 3) == 4096, + "compatibility"); + switch (FSP_FLAGS_GET_ZIP_SSIZE(space->flags)) { + case 1: + case 2: + type = OS_DATA_FILE_NO_O_DIRECT; + break; + default: + type = OS_DATA_FILE; } if (first_time_open @@ -514,9 +518,7 @@ retry: ? OS_FILE_OPEN_RAW | OS_FILE_ON_ERROR_NO_EXIT : OS_FILE_OPEN | OS_FILE_ON_ERROR_NO_EXIT, OS_FILE_AIO, - o_direct_possible - ? OS_DATA_FILE - : OS_DATA_FILE_NO_O_DIRECT, + type, read_only_mode, &success); @@ -556,9 +558,7 @@ fail: ? OS_FILE_OPEN_RAW | OS_FILE_ON_ERROR_NO_EXIT : OS_FILE_OPEN | OS_FILE_ON_ERROR_NO_EXIT, OS_FILE_AIO, - o_direct_possible - ? OS_DATA_FILE - : OS_DATA_FILE_NO_O_DIRECT, + type, read_only_mode, &success); } @@ -887,15 +887,14 @@ fil_space_extend_must_retry( } } -/*******************************************************************//** -Reserves the fil_system.mutex and tries to make sure we can open at least one +/** Reserves the fil_system.mutex and tries to make sure we can open at least one file while holding it. This should be called before calling -fil_node_prepare_for_io(), because that function may need to open a file. */ +fil_node_prepare_for_io(), because that function may need to open a file. +@param[in] space_id tablespace id +@return whether the tablespace is usable for io */ static -void -fil_mutex_enter_and_prepare_for_io( -/*===============================*/ - ulint space_id) /*!< in: space id */ +bool +fil_mutex_enter_and_prepare_for_io(ulint space_id) { for (ulint count = 0;;) { mutex_enter(&fil_system.mutex); @@ -908,7 +907,7 @@ fil_mutex_enter_and_prepare_for_io( fil_space_t* space = fil_space_get_by_id(space_id); if (space == NULL) { - break; + return false; } fil_node_t* node = UT_LIST_GET_LAST(space->chain); @@ -923,6 +922,10 @@ fil_mutex_enter_and_prepare_for_io( the insert buffer. The insert buffer is in tablespace 0, and we cannot end up waiting in this function. */ + } else if (space->is_stopping() && !space->is_being_truncated) { + /* If the tablespace is being deleted then InnoDB + shouldn't prepare the tablespace for i/o */ + return false; } else if (!node || node->is_open()) { /* If the file is already open, no need to do anything; if the space does not exist, we handle the @@ -994,6 +997,8 @@ fil_mutex_enter_and_prepare_for_io( break; } + + return true; } /** Try to extend a tablespace if it is smaller than the specified size. @@ -1010,7 +1015,10 @@ fil_space_extend( bool success; do { - fil_mutex_enter_and_prepare_for_io(space->id); + if (!fil_mutex_enter_and_prepare_for_io(space->id)) { + success = false; + break; + } } while (fil_space_extend_must_retry( space, UT_LIST_GET_LAST(space->chain), size, &success)); @@ -1365,7 +1373,9 @@ fil_space_t* fil_system_t::read_page0(ulint id) /* It is possible that the tablespace is dropped while we are not holding the mutex. */ - fil_mutex_enter_and_prepare_for_io(id); + if (!fil_mutex_enter_and_prepare_for_io(id)) { + return NULL; + } fil_space_t* space = fil_space_get_by_id(id); @@ -2802,7 +2812,6 @@ fil_rename_tablespace( ut_ad(strchr(new_file_name, OS_PATH_SEPARATOR) != NULL); if (!recv_recovery_is_on()) { - fil_name_write_rename(id, old_file_name, new_file_name); log_mutex_enter(); } @@ -2895,13 +2904,22 @@ fil_ibd_create( return NULL; } + ulint type; + static_assert(((UNIV_ZIP_SIZE_MIN >> 1) << 3) == 4096, + "compatibility"); + switch (FSP_FLAGS_GET_ZIP_SSIZE(flags)) { + case 1: + case 2: + type = OS_DATA_FILE_NO_O_DIRECT; + break; + default: + type = OS_DATA_FILE; + } + file = os_file_create( innodb_data_file_key, path, OS_FILE_CREATE | OS_FILE_ON_ERROR_NO_EXIT, - OS_FILE_NORMAL, - OS_DATA_FILE, - srv_read_only_mode, - &success); + OS_FILE_NORMAL, type, srv_read_only_mode, &success); if (!success) { /* The following call will print an error message */ diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 4bdc556cee8..dbfd37544e6 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2020, MariaDB Corporation. +Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1708,7 +1708,7 @@ fts_drop_tables( error = fts_drop_common_tables(trx, &fts_table); - if (error == DB_SUCCESS) { + if (error == DB_SUCCESS && table->fts) { error = fts_drop_all_index_tables(trx, table->fts); } @@ -1730,7 +1730,7 @@ fts_create_in_mem_aux_table( dict_table_t* new_table = dict_mem_table_create( aux_table_name, NULL, n_cols, 0, table->flags, table->space_id == TRX_SYS_SPACE - ? 0 : table->space->purpose == FIL_TYPE_TEMPORARY + ? 0 : table->space_id == SRV_TMP_SPACE_ID ? DICT_TF2_TEMPORARY : DICT_TF2_USE_FILE_PER_TABLE); if (DICT_TF_HAS_DATA_DIR(table->flags)) { @@ -2518,7 +2518,8 @@ fts_get_max_cache_size( } } else { ib::error() << "(" << error << ") reading max" - " cache config value from config table"; + " cache config value from config table " + << fts_table->table->name; } ut_free(value.f_str); @@ -2691,7 +2692,8 @@ func_exit: } else { *doc_id = 0; - ib::error() << "(" << error << ") while getting next doc id."; + ib::error() << "(" << error << ") while getting next doc id " + "for table " << table->name; fts_sql_rollback(trx); if (error == DB_DEADLOCK) { @@ -2771,7 +2773,8 @@ fts_update_sync_doc_id( cache->synced_doc_id = doc_id; } else { ib::error() << "(" << error << ") while" - " updating last doc id."; + " updating last doc id for table" + << table->name; fts_sql_rollback(trx); } @@ -3482,7 +3485,8 @@ fts_add_doc_by_id( } - offsets = rec_get_offsets(clust_rec, clust_index, NULL, true, + offsets = rec_get_offsets(clust_rec, clust_index, NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); for (ulint i = 0; i < num_idx; ++i) { @@ -3996,7 +4000,8 @@ fts_sync_write_words( if (UNIV_UNLIKELY(error != DB_SUCCESS) && !print_error) { ib::error() << "(" << error << ") writing" - " word node to FTS auxiliary index table."; + " word node to FTS auxiliary index table " + << table->name; print_error = TRUE; } } @@ -4151,7 +4156,8 @@ fts_sync_commit( fts_sql_commit(trx); } else { fts_sql_rollback(trx); - ib::error() << "(" << error << ") during SYNC."; + ib::error() << "(" << error << ") during SYNC of " + "table " << sync->table->name; } if (UNIV_UNLIKELY(fts_enable_diag_print) && elapsed_time) { @@ -4922,7 +4928,8 @@ fts_get_rows_count( trx->error_state = DB_SUCCESS; } else { ib::error() << "(" << error - << ") while reading FTS table."; + << ") while reading FTS table " + << table_name; break; /* Exit the loop. */ } diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc index 170fb2e8a57..122402eb34d 100644 --- a/storage/innobase/gis/gis0rtree.cc +++ b/storage/innobase/gis/gis0rtree.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2018, 2020, MariaDB Corporation. +Copyright (c) 2018, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -87,8 +87,9 @@ rtr_page_split_initialize_nodes( stop = task + n_recs; rec = page_rec_get_next(page_get_infimum_rec(page)); - const bool is_leaf = page_is_leaf(page); - *offsets = rec_get_offsets(rec, cursor->index, *offsets, is_leaf, + const ulint n_core = page_is_leaf(page) + ? cursor->index->n_core_fields : 0; + *offsets = rec_get_offsets(rec, cursor->index, *offsets, n_core, n_uniq, &heap); source_cur = rec_get_nth_field(rec, *offsets, 0, &len); @@ -101,7 +102,7 @@ rtr_page_split_initialize_nodes( rec = page_rec_get_next(rec); *offsets = rec_get_offsets(rec, cursor->index, *offsets, - is_leaf, n_uniq, &heap); + n_core, n_uniq, &heap); source_cur = rec_get_nth_field(rec, *offsets, 0, &len); } @@ -308,7 +309,8 @@ rtr_update_mbr_field( page_zip = buf_block_get_page_zip(block); child = btr_node_ptr_get_child_page_no(rec, offsets); - const bool is_leaf = page_is_leaf(block->frame); + const ulint n_core = page_is_leaf(block->frame) + ? index->n_core_fields : 0; if (new_rec) { child_rec = new_rec; @@ -324,7 +326,7 @@ rtr_update_mbr_field( if (cursor2) { rec_t* del_rec = btr_cur_get_rec(cursor2); offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2), - index, NULL, false, + index, NULL, 0, ULINT_UNDEFINED, &heap); del_page_no = btr_node_ptr_get_child_page_no(del_rec, offsets2); cur2_pos = page_rec_get_n_recs_before(btr_cur_get_rec(cursor2)); @@ -389,7 +391,7 @@ rtr_update_mbr_field( = page_rec_get_nth(page, cur2_pos); } offsets2 = rec_get_offsets(btr_cur_get_rec(cursor2), - index, NULL, false, + index, NULL, 0, ULINT_UNDEFINED, &heap); ut_ad(del_page_no == btr_node_ptr_get_child_page_no( cursor2->page_cur.rec, @@ -427,7 +429,7 @@ rtr_update_mbr_field( ut_ad(old_rec != insert_rec); page_cur_position(old_rec, block, &page_cur); - offsets2 = rec_get_offsets(old_rec, index, NULL, is_leaf, + offsets2 = rec_get_offsets(old_rec, index, NULL, n_core, ULINT_UNDEFINED, &heap); page_cur_delete_rec(&page_cur, index, offsets2, mtr); @@ -457,7 +459,7 @@ update_mbr: cur2_rec = cursor2->page_cur.rec; offsets2 = rec_get_offsets(cur2_rec, index, NULL, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); cur2_rec_info = rec_get_info_bits(cur2_rec, @@ -517,7 +519,7 @@ update_mbr: if (ins_suc) { btr_cur_position(index, insert_rec, block, cursor); offsets = rec_get_offsets(insert_rec, - index, offsets, is_leaf, + index, offsets, n_core, ULINT_UNDEFINED, &heap); } @@ -532,7 +534,7 @@ update_mbr: cur2_rec = btr_cur_get_rec(cursor2); offsets2 = rec_get_offsets(cur2_rec, index, NULL, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); /* If the cursor2 position is on a wrong rec, we @@ -546,7 +548,7 @@ update_mbr: while (!page_rec_is_supremum(cur2_rec)) { offsets2 = rec_get_offsets(cur2_rec, index, NULL, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); cur2_pno = btr_node_ptr_get_child_page_no( @@ -836,7 +838,8 @@ rtr_split_page_move_rec_list( rec_move = static_cast<rtr_rec_move_t*>(mem_heap_alloc( heap, sizeof (*rec_move) * max_to_move)); - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) + ? index->n_core_fields : 0; /* Insert the recs in group 2 to new page. */ for (cur_split_node = node_array; @@ -846,10 +849,10 @@ rtr_split_page_move_rec_list( block, cur_split_node->key); offsets = rec_get_offsets(cur_split_node->key, - index, offsets, is_leaf, + index, offsets, n_core, ULINT_UNDEFINED, &heap); - ut_ad(!is_leaf || cur_split_node->key != first_rec); + ut_ad(!n_core || cur_split_node->key != first_rec); rec = page_cur_insert_rec_low( page_cur_get_rec(&new_page_cursor), @@ -884,7 +887,7 @@ rtr_split_page_move_rec_list( same temp-table in parallel. max_trx_id is ignored for temp tables because it not required for MVCC. */ - if (is_leaf && !index->table->is_temporary()) { + if (n_core && !index->table->is_temporary()) { page_update_max_trx_id(new_block, NULL, page_get_max_trx_id(page), mtr); @@ -937,7 +940,7 @@ rtr_split_page_move_rec_list( block, &page_cursor); offsets = rec_get_offsets( page_cur_get_rec(&page_cursor), index, - offsets, is_leaf, ULINT_UNDEFINED, + offsets, n_core, ULINT_UNDEFINED, &heap); page_cur_delete_rec(&page_cursor, index, offsets, mtr); @@ -1136,6 +1139,9 @@ func_start: /* Update the lock table */ lock_rtr_move_rec_list(new_block, block, rec_move, moved); + const ulint n_core = page_level + ? 0 : cursor->index->n_core_fields; + /* Delete recs in first group from the new page. */ for (cur_split_node = rtr_split_node_array; cur_split_node < end_split_node - 1; ++cur_split_node) { @@ -1154,7 +1160,7 @@ func_start: *offsets = rec_get_offsets( page_cur_get_rec(page_cursor), - cursor->index, *offsets, !page_level, + cursor->index, *offsets, n_core, ULINT_UNDEFINED, heap); page_cur_delete_rec(page_cursor, @@ -1171,7 +1177,7 @@ func_start: block, page_cursor); *offsets = rec_get_offsets( page_cur_get_rec(page_cursor), - cursor->index, *offsets, !page_level, + cursor->index, *offsets, n_core, ULINT_UNDEFINED, heap); page_cur_delete_rec(page_cursor, cursor->index, *offsets, mtr); @@ -1400,7 +1406,8 @@ rtr_page_copy_rec_list_end_no_locks( rec_offs offsets_2[REC_OFFS_NORMAL_SIZE]; rec_offs* offsets2 = offsets_2; ulint moved = 0; - bool is_leaf = page_is_leaf(new_page); + const ulint n_core = page_is_leaf(new_page) + ? index->n_core_fields : 0; rec_offs_init(offsets_1); rec_offs_init(offsets_2); @@ -1429,14 +1436,14 @@ rtr_page_copy_rec_list_end_no_locks( cur_rec = page_rec_get_next(cur_rec); } - offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf, + offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core, ULINT_UNDEFINED, &heap); while (!page_rec_is_supremum(cur_rec)) { ulint cur_matched_fields = 0; int cmp; offsets2 = rec_get_offsets(cur_rec, index, offsets2, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); cmp = cmp_rec_rec(cur1_rec, cur_rec, offsets1, offsets2, index, false, @@ -1448,7 +1455,7 @@ rtr_page_copy_rec_list_end_no_locks( /* Skip small recs. */ page_cur_move_to_next(&page_cur); cur_rec = page_cur_get_rec(&page_cur); - } else if (is_leaf) { + } else if (n_core) { if (rec_get_deleted_flag(cur1_rec, dict_table_is_comp(index->table))) { goto next; @@ -1471,7 +1478,7 @@ rtr_page_copy_rec_list_end_no_locks( cur_rec = page_cur_get_rec(&page_cur); - offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf, + offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core, ULINT_UNDEFINED, &heap); ins_rec = page_cur_insert_rec_low(cur_rec, index, @@ -1527,7 +1534,8 @@ rtr_page_copy_rec_list_start_no_locks( rec_offs* offsets2 = offsets_2; page_cur_t page_cur; ulint moved = 0; - bool is_leaf = page_is_leaf(buf_block_get_frame(block)); + const ulint n_core = page_is_leaf(buf_block_get_frame(block)) + ? index->n_core_fields : 0; rec_offs_init(offsets_1); rec_offs_init(offsets_2); @@ -1547,14 +1555,14 @@ rtr_page_copy_rec_list_start_no_locks( cur_rec = page_rec_get_next(cur_rec); } - offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf, + offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core, ULINT_UNDEFINED, &heap); while (!page_rec_is_supremum(cur_rec)) { ulint cur_matched_fields = 0; offsets2 = rec_get_offsets(cur_rec, index, offsets2, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); int cmp = cmp_rec_rec(cur1_rec, cur_rec, offsets1, offsets2, index, false, @@ -1567,7 +1575,7 @@ rtr_page_copy_rec_list_start_no_locks( /* Skip small recs. */ page_cur_move_to_next(&page_cur); cur_rec = page_cur_get_rec(&page_cur); - } else if (is_leaf) { + } else if (n_core) { if (rec_get_deleted_flag( cur1_rec, dict_table_is_comp(index->table))) { @@ -1591,7 +1599,7 @@ rtr_page_copy_rec_list_start_no_locks( cur_rec = page_cur_get_rec(&page_cur); - offsets1 = rec_get_offsets(cur1_rec, index, offsets1, is_leaf, + offsets1 = rec_get_offsets(cur1_rec, index, offsets1, n_core, ULINT_UNDEFINED, &heap); ins_rec = page_cur_insert_rec_low(cur_rec, index, @@ -1745,7 +1753,7 @@ rtr_check_same_block( while (!page_rec_is_supremum(rec)) { offsets = rec_get_offsets( - rec, index, NULL, false, ULINT_UNDEFINED, &heap); + rec, index, NULL, 0, ULINT_UNDEFINED, &heap); if (btr_node_ptr_get_child_page_no(rec, offsets) == page_no) { btr_cur_position(index, rec, parentb, cursor); diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc index 849e080728f..18f75e3d139 100644 --- a/storage/innobase/gis/gis0sea.cc +++ b/storage/innobase/gis/gis0sea.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2016, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -530,8 +530,7 @@ rtr_compare_cursor_rec( rec = btr_cur_get_rec(cursor); - offsets = rec_get_offsets( - rec, index, NULL, false, ULINT_UNDEFINED, heap); + offsets = rec_get_offsets(rec, index, NULL, 0, ULINT_UNDEFINED, heap); return(btr_node_ptr_get_child_page_no(rec, offsets) == page_no); } @@ -836,7 +835,8 @@ rtr_page_get_father_node_ptr( user_rec = btr_cur_get_rec(cursor); ut_a(page_rec_is_user_rec(user_rec)); - offsets = rec_get_offsets(user_rec, index, offsets, !level, + offsets = rec_get_offsets(user_rec, index, offsets, + level ? 0 : index->n_fields, ULINT_UNDEFINED, &heap); rtr_get_mbr_from_rec(user_rec, offsets, &mbr); @@ -853,7 +853,7 @@ rtr_page_get_father_node_ptr( node_ptr = btr_cur_get_rec(cursor); ut_ad(!page_rec_is_comp(node_ptr) || rec_get_status(node_ptr) == REC_STATUS_NODE_PTR); - offsets = rec_get_offsets(node_ptr, index, offsets, false, + offsets = rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED, &heap); ulint child_page = btr_node_ptr_get_child_page_no(node_ptr, offsets); @@ -871,13 +871,14 @@ rtr_page_get_father_node_ptr( print_rec = page_rec_get_next( page_get_infimum_rec(page_align(user_rec))); offsets = rec_get_offsets(print_rec, index, offsets, - page_rec_is_leaf(user_rec), + page_rec_is_leaf(user_rec) + ? index->n_fields : 0, ULINT_UNDEFINED, &heap); error << "; child "; rec_print(error.m_oss, print_rec, rec_get_info_bits(print_rec, rec_offs_comp(offsets)), offsets); - offsets = rec_get_offsets(node_ptr, index, offsets, false, + offsets = rec_get_offsets(node_ptr, index, offsets, 0, ULINT_UNDEFINED, &heap); error << "; parent "; rec_print(error.m_oss, print_rec, @@ -1309,10 +1310,12 @@ rtr_cur_restore_position( heap = mem_heap_create(256); offsets1 = rec_get_offsets( - r_cursor->old_rec, index, NULL, !level, + r_cursor->old_rec, index, NULL, + level ? 0 : r_cursor->old_n_fields, r_cursor->old_n_fields, &heap); offsets2 = rec_get_offsets( - rec, index, NULL, !level, + rec, index, NULL, + level ? 0 : r_cursor->old_n_fields, r_cursor->old_n_fields, &heap); comp = rec_offs_comp(offsets1); @@ -1379,12 +1382,12 @@ search_again: rec = btr_pcur_get_rec(r_cursor); - offsets1 = rec_get_offsets( - r_cursor->old_rec, index, NULL, !level, - r_cursor->old_n_fields, &heap); - offsets2 = rec_get_offsets( - rec, index, NULL, !level, - r_cursor->old_n_fields, &heap); + offsets1 = rec_get_offsets(r_cursor->old_rec, index, NULL, + level ? 0 : r_cursor->old_n_fields, + r_cursor->old_n_fields, &heap); + offsets2 = rec_get_offsets(rec, index, NULL, + level ? 0 : r_cursor->old_n_fields, + r_cursor->old_n_fields, &heap); comp = rec_offs_comp(offsets1); @@ -1673,7 +1676,7 @@ rtr_cur_search_with_match( page = buf_block_get_frame(block); const ulint level = btr_page_get_level(page); - const bool is_leaf = !level; + const ulint n_core = level ? 0 : index->n_fields; if (mode == PAGE_CUR_RTREE_LOCATE) { ut_ad(level != 0); @@ -1695,7 +1698,7 @@ rtr_cur_search_with_match( ulint new_rec_size = rec_get_converted_size(index, tuple, 0); - offsets = rec_get_offsets(rec, index, offsets, is_leaf, + offsets = rec_get_offsets(rec, index, offsets, n_core, dtuple_get_n_fields_cmp(tuple), &heap); @@ -1716,10 +1719,10 @@ rtr_cur_search_with_match( } while (!page_rec_is_supremum(rec)) { - offsets = rec_get_offsets(rec, index, offsets, is_leaf, + offsets = rec_get_offsets(rec, index, offsets, n_core, dtuple_get_n_fields_cmp(tuple), &heap); - if (!is_leaf) { + if (!n_core) { switch (mode) { case PAGE_CUR_CONTAIN: case PAGE_CUR_INTERSECT: @@ -1800,7 +1803,7 @@ rtr_cur_search_with_match( to rtr_info->path for non-leaf nodes, or rtr_info->matches for leaf nodes */ if (rtr_info && mode != PAGE_CUR_RTREE_INSERT) { - if (!is_leaf) { + if (!n_core) { ulint page_no; node_seq_t new_seq; bool is_loc; @@ -1811,7 +1814,7 @@ rtr_cur_search_with_match( == PAGE_CUR_RTREE_GET_FATHER); offsets = rec_get_offsets( - rec, index, offsets, false, + rec, index, offsets, 0, ULINT_UNDEFINED, &heap); page_no = btr_node_ptr_get_child_page_no( @@ -1860,7 +1863,8 @@ rtr_cur_search_with_match( /* Collect matched records on page */ offsets = rec_get_offsets( - rec, index, offsets, true, + rec, index, offsets, + index->n_fields, ULINT_UNDEFINED, &heap); rtr_leaf_push_match_rec( rec, rtr_info, offsets, @@ -1883,7 +1887,7 @@ rtr_cur_search_with_match( /* All records on page are searched */ if (page_rec_is_supremum(rec)) { - if (!is_leaf) { + if (!n_core) { if (!found) { /* No match case, if it is for insertion, then we select the record that result in @@ -1893,7 +1897,7 @@ rtr_cur_search_with_match( ut_ad(least_inc < DBL_MAX); offsets = rec_get_offsets( best_rec, index, offsets, - false, ULINT_UNDEFINED, &heap); + 0, ULINT_UNDEFINED, &heap); child_no = btr_node_ptr_get_child_page_no( best_rec, offsets); @@ -1945,11 +1949,11 @@ rtr_cur_search_with_match( /* Verify the record to be positioned is the same as the last record in matched_rec vector */ offsets2 = rec_get_offsets(test_rec.r_rec, index, - offsets2, true, + offsets2, index->n_fields, ULINT_UNDEFINED, &heap); offsets = rec_get_offsets(last_match_rec, index, - offsets, true, + offsets, index->n_fields, ULINT_UNDEFINED, &heap); ut_ad(cmp_rec_rec(test_rec.r_rec, last_match_rec, @@ -1966,9 +1970,8 @@ rtr_cur_search_with_match( ulint child_no; ut_ad(!last_match_rec && rec); - offsets = rec_get_offsets( - rec, index, offsets, false, - ULINT_UNDEFINED, &heap); + offsets = rec_get_offsets(rec, index, offsets, 0, + ULINT_UNDEFINED, &heap); child_no = btr_node_ptr_get_child_page_no(rec, offsets); @@ -1976,7 +1979,7 @@ rtr_cur_search_with_match( index, rtr_info->parent_path, level, child_no, block, rec, 0); - } else if (rtr_info && found && !is_leaf) { + } else if (rtr_info && found && !n_core) { rec = last_match_rec; } @@ -1986,11 +1989,11 @@ rtr_cur_search_with_match( #ifdef UNIV_DEBUG /* Verify that we are positioned at the same child page as pushed in the path stack */ - if (!is_leaf && (!page_rec_is_supremum(rec) || found) + if (!n_core && (!page_rec_is_supremum(rec) || found) && mode != PAGE_CUR_RTREE_INSERT) { ulint page_no; - offsets = rec_get_offsets(rec, index, offsets, false, + offsets = rec_get_offsets(rec, index, offsets, 0, ULINT_UNDEFINED, &heap); page_no = btr_node_ptr_get_child_page_no(rec, offsets); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 345f2a37d5d..68b7a018821 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4,7 +4,7 @@ Copyright (c) 2000, 2020, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -62,6 +62,7 @@ this program; if not, write to the Free Software Foundation, Inc., #include <my_service_manager.h> #include <key.h> +#include <sql_manager.h> /* Include necessary InnoDB headers */ #include "btr0btr.h" @@ -200,7 +201,6 @@ static char* innodb_large_prefix; stopword table to be used */ static char* innobase_server_stopword_table; -static my_bool innobase_use_atomic_writes; static my_bool innobase_use_checksums; static my_bool innobase_locks_unsafe_for_binlog; static my_bool innobase_rollback_on_timeout; @@ -1849,9 +1849,7 @@ thd_to_trx_id( return(thd_to_trx(thd)->id); } -static int -wsrep_abort_transaction(handlerton* hton, THD *bf_thd, THD *victim_thd, - my_bool signal); +static void wsrep_abort_transaction(handlerton*, THD *, THD *, my_bool); static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid); static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid); #endif /* WITH_WSREP */ @@ -2478,6 +2476,72 @@ innobase_raw_format( return(ut_str_sql_format(buf_tmp, buf_tmp_used, buf, buf_size)); } +/* +The helper function nlz(x) calculates the number of leading zeros +in the binary representation of the number "x", either using a +built-in compiler function or a substitute trick based on the use +of the multiplication operation and a table indexed by the prefix +of the multiplication result: +*/ +#ifdef __GNUC__ +#define nlz(x) __builtin_clzll(x) +#elif defined(_MSC_VER) && !defined(_M_CEE_PURE) && \ + (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64)) +#ifndef __INTRIN_H_ +#pragma warning(push, 4) +#pragma warning(disable: 4255 4668) +#include <intrin.h> +#pragma warning(pop) +#endif +__forceinline unsigned int nlz (ulonglong x) +{ +#if defined(_M_IX86) || defined(_M_X64) + unsigned long n; +#ifdef _M_X64 + _BitScanReverse64(&n, x); + return (unsigned int) n ^ 63; +#else + unsigned long y = (unsigned long) (x >> 32); + unsigned int m = 31; + if (y == 0) + { + y = (unsigned long) x; + m = 63; + } + _BitScanReverse(&n, y); + return (unsigned int) n ^ m; +#endif +#elif defined(_M_ARM64) + return _CountLeadingZeros(x); +#endif +} +#else +inline unsigned int nlz (ulonglong x) +{ + static unsigned char table [48] = { + 32, 6, 5, 0, 4, 12, 0, 20, + 15, 3, 11, 0, 0, 18, 25, 31, + 8, 14, 2, 0, 10, 0, 0, 0, + 0, 0, 0, 21, 0, 0, 19, 26, + 7, 0, 13, 0, 16, 1, 22, 27, + 9, 0, 17, 23, 28, 24, 29, 30 + }; + unsigned int y= (unsigned int) (x >> 32); + unsigned int n= 0; + if (y == 0) { + y= (unsigned int) x; + n= 32; + } + y = y | (y >> 1); // Propagate leftmost 1-bit to the right. + y = y | (y >> 2); + y = y | (y >> 4); + y = y | (y >> 8); + y = y & ~(y >> 16); + y = y * 0x3EF5D037; + return n + table[y >> 26]; +} +#endif + /*********************************************************************//** Compute the next autoinc value. @@ -2506,85 +2570,93 @@ innobase_next_autoinc( ulonglong max_value) /*!< in: max value for type */ { ulonglong next_value; - ulonglong block = need * step; + ulonglong block; /* Should never be 0. */ ut_a(need > 0); - ut_a(block > 0); + ut_a(step > 0); ut_a(max_value > 0); - /* - Allow auto_increment to go over max_value up to max ulonglong. - This allows us to detect that all values are exhausted. - If we don't do this, we will return max_value several times - and get duplicate key errors instead of auto increment value - out of range. - */ - max_value= (~(ulonglong) 0); + /* + We need to calculate the "block" value equal to the product + "step * need". However, when calculating this product, an integer + overflow can occur, so we cannot simply use the usual multiplication + operation. The snippet below calculates the product of two numbers + and detects an unsigned integer overflow: + */ + unsigned int m= nlz(need); + unsigned int n= nlz(step); + if (m + n <= 8 * sizeof(ulonglong) - 2) { + // The bit width of the original values is too large, + // therefore we are guaranteed to get an overflow. + goto overflow; + } + block = need * (step >> 1); + if ((longlong) block < 0) { + goto overflow; + } + block += block; + if (step & 1) { + block += need; + if (block < need) { + goto overflow; + } + } + + /* Check for overflow. Current can be > max_value if the value + is in reality a negative value. Also, the visual studio compiler + converts large double values (which hypothetically can then be + passed here as the values of the "current" parameter) automatically + into unsigned long long datatype maximum value: */ + if (current > max_value) { + goto overflow; + } /* According to MySQL documentation, if the offset is greater than the step then the offset is ignored. */ - if (offset > block) { + if (offset > step) { offset = 0; } - /* Check for overflow. Current can be > max_value if the value is - in reality a negative value.The visual studio compilers converts - large double values automatically into unsigned long long datatype - maximum value */ - - if (block >= max_value - || offset > max_value - || current >= max_value - || max_value - offset <= offset) { - - next_value = max_value; + /* + Let's round the current value to within a step-size block: + */ + if (current > offset) { + next_value = current - offset; } else { - ut_a(max_value > current); - - ulonglong free = max_value - current; - - if (free < offset || free - offset <= block) { - next_value = max_value; - } else { - next_value = 0; - } + next_value = offset - current; } + next_value -= next_value % step; - if (next_value == 0) { - ulonglong next; - - if (current > offset) { - next = (current - offset) / step; - } else { - next = (offset - current) / step; - } - - ut_a(max_value > next); - next_value = next * step; - /* Check for multiplication overflow. */ - ut_a(next_value >= next); - ut_a(max_value > next_value); - - /* Check for overflow */ - if (max_value - next_value >= block) { - - next_value += block; - - if (max_value - next_value >= offset) { - next_value += offset; - } else { - next_value = max_value; - } - } else { - next_value = max_value; - } + /* + Add an offset to the next value and check that the addition + does not cause an integer overflow: + */ + next_value += offset; + if (next_value < offset) { + goto overflow; } - ut_a(next_value != 0); - ut_a(next_value <= max_value); + /* + Add a block to the next value and check that the addition + does not cause an integer overflow: + */ + next_value += block; + if (next_value < block) { + goto overflow; + } return(next_value); + +overflow: + /* + Allow auto_increment to go over max_value up to max ulonglong. + This allows us to detect that all values are exhausted. + If we don't do this, we will return max_value several times + and get duplicate key errors instead of auto increment value + out of range: + */ + return(~(ulonglong) 0); } /********************************************************************//** @@ -3473,10 +3545,12 @@ ha_innobase::init_table_handle_for_HANDLER(void) reset_template(); } -/** Free tablespace resources allocated. */ -void innobase_space_shutdown() +/*********************************************************************//** +Free any resources that were allocated and return failure. +@return always return 1 */ +static int innodb_init_abort() { - DBUG_ENTER("innobase_space_shutdown"); + DBUG_ENTER("innodb_init_abort"); if (fil_system.temp_space) { fil_system.temp_space->close(); @@ -3491,19 +3565,21 @@ void innobase_space_shutdown() #ifdef WITH_INNODB_DISALLOW_WRITES os_event_destroy(srv_allow_writes_event); #endif /* WITH_INNODB_DISALLOW_WRITES */ - - DBUG_VOID_RETURN; -} - -/** Free any resources that were allocated and return failure. -@return always return 1 */ -static int innodb_init_abort() -{ - DBUG_ENTER("innodb_init_abort"); - innobase_space_shutdown(); DBUG_RETURN(1); } +/** Deprecation message about innodb_idle_flush_pct */ +static const char* deprecated_idle_flush_pct + = "innodb_idle_flush_pct is DEPRECATED and has no effect."; + +static const char* deprecated_innodb_checksum_algorithm + = "Setting innodb_checksum_algorithm to values other than" + " crc32, full_crc32, strict_crc32 or strict_full_crc32" + " is UNSAFE and DEPRECATED." + " These deprecated values will be disallowed in MariaDB 10.6."; + +static ulong innodb_idle_flush_pct; + /** If applicable, emit a message that log checksums cannot be disabled. @param[in,out] thd client session, or NULL if at startup @param[in] check whether redo log block checksums are enabled @@ -3530,6 +3606,23 @@ innodb_log_checksums_func_update(THD* thd, bool check) return(check); } +static void innodb_checksum_algorithm_update(THD *thd, st_mysql_sys_var*, + void *, const void *save) +{ + srv_checksum_algorithm= *static_cast<const ulong*>(save); + switch (srv_checksum_algorithm) { + case SRV_CHECKSUM_ALGORITHM_CRC32: + case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: + case SRV_CHECKSUM_ALGORITHM_FULL_CRC32: + case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32: + break; + default: + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_UNSUPPORTED, + deprecated_innodb_checksum_algorithm); + } +} + /****************************************************************//** Gives the file extension of an InnoDB single-table tablespace. */ static const char* ha_innobase_exts[] = { @@ -3954,9 +4047,18 @@ static int innodb_init_params() if (!innobase_use_checksums) { ib::warn() << "Setting innodb_checksums to OFF is DEPRECATED." - " This option may be removed in future releases. You" - " should set innodb_checksum_algorithm=NONE instead."; + " This option was removed in MariaDB 10.5."; srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_NONE; + } else { + switch (srv_checksum_algorithm) { + case SRV_CHECKSUM_ALGORITHM_CRC32: + case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: + case SRV_CHECKSUM_ALGORITHM_FULL_CRC32: + case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32: + break; + default: + ib::warn() << deprecated_innodb_checksum_algorithm; + } } innodb_log_checksums = innodb_log_checksums_func_update( @@ -4010,21 +4112,16 @@ static int innodb_init_params() innobase_commit_concurrency_init_default(); - srv_use_atomic_writes - = innobase_use_atomic_writes && my_may_have_atomic_write; - if (srv_use_atomic_writes && !srv_file_per_table) - { - fprintf(stderr, "InnoDB: Disabling atomic_writes as file_per_table is not used.\n"); - srv_use_atomic_writes= 0; - } + if (innodb_idle_flush_pct != 100) { + ib::warn() << deprecated_idle_flush_pct; + } - if (srv_use_atomic_writes) { - fprintf(stderr, "InnoDB: using atomic writes.\n"); +#ifndef _WIN32 + if (srv_use_atomic_writes && my_may_have_atomic_write) { /* Force O_DIRECT on Unixes (on Windows writes are always unbuffered) */ -#ifndef _WIN32 switch (innodb_flush_method) { case SRV_O_DIRECT: case SRV_O_DIRECT_NO_FSYNC: @@ -4033,8 +4130,8 @@ static int innodb_init_params() innodb_flush_method = SRV_O_DIRECT; fprintf(stderr, "InnoDB: using O_DIRECT due to atomic writes.\n"); } -#endif } +#endif if (srv_read_only_mode) { ib::info() << "Started in read only mode"; @@ -4122,8 +4219,10 @@ static int innodb_init(void* p) innobase_hton->flush_logs = innobase_flush_logs; innobase_hton->show_status = innobase_show_status; innobase_hton->flags = - HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS - | HTON_NATIVE_SYS_VERSIONING | HTON_WSREP_REPLICATION; + HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS | + HTON_NATIVE_SYS_VERSIONING | + HTON_WSREP_REPLICATION | + HTON_REQUIRES_CLOSE_AFTER_TRUNCATE; #ifdef WITH_WSREP innobase_hton->abort_transaction=wsrep_abort_transaction; @@ -4309,7 +4408,6 @@ innobase_end(handlerton*, ha_panic_function) } innodb_shutdown(); - innobase_space_shutdown(); mysql_mutex_destroy(&commit_cond_m); mysql_cond_destroy(&commit_cond); @@ -5037,6 +5135,7 @@ innobase_close_connection( if (trx) { + thd_set_ha_data(thd, hton, NULL); if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { sql_print_error("Transaction not registered for MariaDB 2PC, " @@ -5075,7 +5174,7 @@ rollback_and_free: DBUG_RETURN(0); } -UNIV_INTERN void lock_cancel_waiting_and_release(lock_t* lock); +void lock_cancel_waiting_and_release(lock_t *lock); /** Cancel any pending lock request associated with the current THD. @sa THD::awake() @sa ha_kill_query() */ @@ -5085,6 +5184,7 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels) if (trx_t* trx= thd_to_trx(thd)) { + ut_ad(trx->mysql_thd == thd); #ifdef WITH_WSREP if (trx->is_wsrep() && wsrep_thd_is_aborting(thd)) /* if victim has been signaled by BF thread and/or aborting is already @@ -5093,28 +5193,15 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels) DBUG_VOID_RETURN; #endif /* WITH_WSREP */ lock_mutex_enter(); - mutex_enter(&trx_sys.mutex); - trx_mutex_enter(trx); - /* It is possible that innobase_close_connection() is concurrently - being executed on our victim. Even if the trx object is later - reused for another client connection or a background transaction, - its trx->mysql_thd will differ from our thd. - - trx_t::state changes are protected by trx_t::mutex, and - trx_sys.trx_list is protected by trx_sys.mutex, in - both trx_create() and trx_t::free(). - - At this point, trx may have been reallocated for another client - connection, or for a background operation. In that case, either - trx_t::state or trx_t::mysql_thd should not match our expectations. */ - bool cancel= trx->mysql_thd == thd && trx->state == TRX_STATE_ACTIVE && - !trx->lock.was_chosen_as_deadlock_victim; - mutex_exit(&trx_sys.mutex); - if (!cancel); - else if (lock_t *lock= trx->lock.wait_lock) + if (lock_t *lock= trx->lock.wait_lock) + { + trx_mutex_enter(trx); + if (trx->is_wsrep() && wsrep_thd_is_aborting(thd)) + trx->lock.was_chosen_as_deadlock_victim= TRUE; lock_cancel_waiting_and_release(lock); + trx_mutex_exit(trx); + } lock_mutex_exit(); - trx_mutex_exit(trx); } DBUG_VOID_RETURN; @@ -6108,13 +6195,6 @@ ha_innobase::open(const char* name, int, uint) innobase_copy_frm_flags_from_table_share(ib_table, table->s); - /* No point to init any statistics if tablespace is still encrypted. */ - if (ib_table->is_readable()) { - dict_stats_init(ib_table); - } else { - ib_table->stat_initialized = 1; - } - MONITOR_INC(MONITOR_TABLE_OPEN); if ((ib_table->flags2 & DICT_TF2_DISCARDED)) { @@ -6307,11 +6387,14 @@ ha_innobase::open(const char* name, int, uint) } } - if (table && m_prebuilt->table) { - ut_ad(table->versioned() == m_prebuilt->table->versioned()); + ut_ad(!m_prebuilt->table + || table->versioned() == m_prebuilt->table->versioned()); + + if (!THDVAR(thd, background_thread)) { + info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST + | HA_STATUS_OPEN); } - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST | HA_STATUS_OPEN); DBUG_RETURN(0); } @@ -6520,8 +6603,8 @@ wsrep_innobase_mysql_sort( case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_VARCHAR: { - uchar tmp_str[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'}; - uint tmp_length = REC_VERSION_56_MAX_INDEX_COL_LEN; + uchar *tmp_str; + uint tmp_length; /* Use the charset number to pick the right charset struct for the comparison. Since the MySQL function get_charset may be @@ -6544,7 +6627,11 @@ wsrep_innobase_mysql_sort( } } - ut_a(str_length <= tmp_length); + // Note that strnxfrm may change length of string + tmp_length= charset->coll->strnxfrmlen(charset, str_length); + tmp_length= ut_max(str_length, tmp_length) + 1; + tmp_str= static_cast<uchar *>(ut_malloc_nokey(tmp_length)); + ut_ad(str_length <= tmp_length); memcpy(tmp_str, str, str_length); tmp_length = charset->coll->strnxfrm(charset, str, str_length, @@ -6568,6 +6655,7 @@ wsrep_innobase_mysql_sort( ret_length = tmp_length; } + ut_free(tmp_str); break; } case MYSQL_TYPE_DECIMAL : @@ -6919,7 +7007,7 @@ wsrep_store_key_val_for_row( THD* thd, TABLE* table, uint keynr, /*!< in: key number */ - char* buff, /*!< in/out: buffer for the key value (in MySQL + uchar* buff, /*!< in/out: buffer for the key value (in MySQL format) */ uint buff_len,/*!< in: buffer length */ const uchar* record, @@ -6928,7 +7016,7 @@ wsrep_store_key_val_for_row( KEY* key_info = table->key_info + keynr; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - char* buff_start = buff; + uchar* buff_start = buff; enum_field_types mysql_type; Field* field; uint buff_space = buff_len; @@ -6940,7 +7028,8 @@ wsrep_store_key_val_for_row( for (; key_part != end; key_part++) { - uchar sorted[REC_VERSION_56_MAX_INDEX_COL_LEN] = {'\0'}; + uchar *sorted=NULL; + uint max_len=0; ibool part_is_null = FALSE; if (key_part->null_bit) { @@ -7019,10 +7108,14 @@ wsrep_store_key_val_for_row( true_len = key_len; } + max_len= true_len; + sorted= static_cast<uchar *>(ut_malloc_nokey(max_len+1)); memcpy(sorted, data, true_len); true_len = wsrep_innobase_mysql_sort( mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + max_len); + ut_ad(true_len <= max_len); + if (wsrep_protocol_version > 1) { /* Note that we always reserve the maximum possible length of the true VARCHAR in the key value, though @@ -7107,11 +7200,13 @@ wsrep_store_key_val_for_row( true_len = key_len; } + max_len= true_len; + sorted= static_cast<uchar *>(ut_malloc_nokey(max_len+1)); memcpy(sorted, blob_data, true_len); true_len = wsrep_innobase_mysql_sort( mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); - + max_len); + ut_ad(true_len <= max_len); /* Note that we always reserve the maximum possible length of the BLOB prefix in the key value. */ @@ -7187,10 +7282,14 @@ wsrep_store_key_val_for_row( cs->mbmaxlen), &error); } + + max_len= true_len; + sorted= static_cast<uchar *>(ut_malloc_nokey(max_len+1)); memcpy(sorted, src_start, true_len); true_len = wsrep_innobase_mysql_sort( mysql_type, cs->number, sorted, true_len, - REC_VERSION_56_MAX_INDEX_COL_LEN); + max_len); + ut_ad(true_len <= max_len); if (true_len > buff_space) { fprintf (stderr, @@ -7205,6 +7304,11 @@ wsrep_store_key_val_for_row( buff += true_len; buff_space -= true_len; } + + if (sorted) { + ut_free(sorted); + sorted= NULL; + } } ut_a(buff <= buff_start + buff_len); @@ -8014,7 +8118,6 @@ ha_innobase::write_row( /* Handling of errors related to auto-increment. */ if (auto_inc_used) { ulonglong auto_inc; - ulonglong col_max_value; /* Note the number of rows processed for this statement, used by get_auto_increment() to determine the number of AUTO-INC @@ -8024,11 +8127,6 @@ ha_innobase::write_row( --trx->n_autoinc_rows; } - /* We need the upper limit of the col type to check for - whether we update the table autoinc counter or not. */ - col_max_value = - table->next_number_field->get_max_int_value(); - /* Get the value that MySQL attempted to store in the table.*/ auto_inc = table->next_number_field->val_uint(); @@ -8095,36 +8193,25 @@ ha_innobase::write_row( if (auto_inc >= m_prebuilt->autoinc_last_value) { set_max_autoinc: + /* We need the upper limit of the col type to check for + whether we update the table autoinc counter or not. */ + ulonglong col_max_value = + table->next_number_field->get_max_int_value(); + /* This should filter out the negative values set explicitly by the user. */ if (auto_inc <= col_max_value) { + ut_ad(m_prebuilt->autoinc_increment > 0); ulonglong offset; ulonglong increment; dberr_t err; -#ifdef WITH_WSREP - /* Applier threads which are processing - ROW events and don't go through server - level autoinc processing, therefore - m_prebuilt autoinc values don't get - properly assigned. Fetch values from - server side. */ - if (trx->is_wsrep() && - wsrep_thd_is_applying(m_user_thd)) - { - wsrep_thd_auto_increment_variables( - m_user_thd, &offset, &increment); - } - else -#endif /* WITH_WSREP */ - { - ut_a(m_prebuilt->autoinc_increment > 0); - offset = m_prebuilt->autoinc_offset; - increment = m_prebuilt->autoinc_increment; - } + + offset = m_prebuilt->autoinc_offset; + increment = m_prebuilt->autoinc_increment; + auto_inc = innobase_next_autoinc( - auto_inc, - 1, increment, offset, + auto_inc, 1, increment, offset, col_max_value); err = innobase_set_max_autoinc( @@ -8652,6 +8739,8 @@ wsrep_calc_row_hash( for (uint i = 0; i < table->s->fields; i++) { byte null_byte=0; byte true_byte=1; + ulint col_type; + ulint is_unsigned; const Field* field = table->field[i]; if (!field->stored_in_db()) { @@ -8660,8 +8749,9 @@ wsrep_calc_row_hash( ptr = (const byte*) row + get_field_offset(table, field); len = field->pack_length(); + col_type = get_innobase_type_from_mysql_type(&is_unsigned, field); - switch (prebuilt->table->cols[i].mtype) { + switch (col_type) { case DATA_BLOB: ptr = row_mysql_read_blob_ref(&len, ptr, len); @@ -8779,6 +8869,20 @@ ha_innobase::update_row( MySQL that the row is not really updated and it should not increase the count of updated rows. This is fix for http://bugs.mysql.com/29157 */ + if (m_prebuilt->versioned_write + && thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE + /* Multiple UPDATE of same rows in single transaction create + historical rows only once. */ + && trx->id != table->vers_start_id()) { + error = row_insert_for_mysql((byte*) old_row, + m_prebuilt, + ROW_INS_HISTORICAL); + if (error != DB_SUCCESS) { + goto func_exit; + } + innobase_srv_conc_exit_innodb(m_prebuilt); + innobase_active_small(); + } DBUG_RETURN(HA_ERR_RECORD_IS_THE_SAME); } else { const bool vers_set_fields = m_prebuilt->versioned_write @@ -8812,39 +8916,37 @@ ha_innobase::update_row( /* A value for an AUTO_INCREMENT column was specified in the UPDATE statement. */ - ulonglong offset; - ulonglong increment; -#ifdef WITH_WSREP - /* Applier threads which are processing - ROW events and don't go through server - level autoinc processing, therefore - m_prebuilt autoinc values don't get - properly assigned. Fetch values from - server side. */ - if (trx->is_wsrep() && wsrep_thd_is_applying(m_user_thd)) - wsrep_thd_auto_increment_variables( - m_user_thd, &offset, &increment); - else -#endif /* WITH_WSREP */ - offset = m_prebuilt->autoinc_offset, - increment = m_prebuilt->autoinc_increment; - - autoinc = innobase_next_autoinc( - autoinc, 1, increment, offset, - table->found_next_number_field->get_max_int_value()); - - error = innobase_set_max_autoinc(autoinc); - - if (m_prebuilt->table->persistent_autoinc) { - /* Update the PAGE_ROOT_AUTO_INC. Yes, we do - this even if dict_table_t::autoinc already was - greater than autoinc, because we cannot know - if any INSERT actually used (and wrote to - PAGE_ROOT_AUTO_INC) a value bigger than our - autoinc. */ - btr_write_autoinc(dict_table_get_first_index( - m_prebuilt->table), - autoinc); + /* We need the upper limit of the col type to check for + whether we update the table autoinc counter or not. */ + ulonglong col_max_value = + table->found_next_number_field->get_max_int_value(); + + /* This should filter out the negative + values set explicitly by the user. */ + if (autoinc <= col_max_value) { + ulonglong offset; + ulonglong increment; + + offset = m_prebuilt->autoinc_offset; + increment = m_prebuilt->autoinc_increment; + + autoinc = innobase_next_autoinc( + autoinc, 1, increment, offset, + col_max_value); + + error = innobase_set_max_autoinc(autoinc); + + if (m_prebuilt->table->persistent_autoinc) { + /* Update the PAGE_ROOT_AUTO_INC. Yes, we do + this even if dict_table_t::autoinc already was + greater than autoinc, because we cannot know + if any INSERT actually used (and wrote to + PAGE_ROOT_AUTO_INC) a value bigger than our + autoinc. */ + btr_write_autoinc(dict_table_get_first_index( + m_prebuilt->table), + autoinc); + } } } @@ -10295,7 +10397,7 @@ wsrep_append_key( THD *thd, trx_t *trx, TABLE_SHARE *table_share, - const char* key, + const uchar* key, uint16_t key_len, Wsrep_service_key_type key_type /*!< in: access type of this key (shared, exclusive, semi...) */ @@ -10407,8 +10509,8 @@ ha_innobase::wsrep_append_keys( if (wsrep_protocol_version == 0) { uint len; - char keyval[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; - char *key = &keyval[0]; + uchar keyval[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; + uchar *key = &keyval[0]; ibool is_null; len = wsrep_store_key_val_for_row( @@ -10449,12 +10551,12 @@ ha_innobase::wsrep_append_keys( /* keyval[] shall contain an ordinal number at byte 0 and the actual key data shall be written at byte 1. Hence the total data length is the key length + 1 */ - char keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; - char keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'}; - keyval0[0] = (char)i; - keyval1[0] = (char)i; - char* key0 = &keyval0[1]; - char* key1 = &keyval1[1]; + uchar keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1]= {'\0'}; + uchar keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1]= {'\0'}; + keyval0[0] = (uchar)i; + keyval1[0] = (uchar)i; + uchar* key0 = &keyval0[1]; + uchar* key1 = &keyval1[1]; if (!tab) { WSREP_WARN("MariaDB-InnoDB key mismatch %s %s", @@ -10528,22 +10630,20 @@ ha_innobase::wsrep_append_keys( /* if no PK, calculate hash of full row, to be the key value */ if (!key_appended && wsrep_certify_nonPK) { uchar digest[16]; - int rcode; wsrep_calc_row_hash(digest, record0, table, m_prebuilt); - if ((rcode = wsrep_append_key(thd, trx, table_share, - (const char*) digest, 16, - key_type))) { + if (int rcode = wsrep_append_key(thd, trx, table_share, + digest, 16, key_type)) { DBUG_RETURN(rcode); } if (record1) { wsrep_calc_row_hash( digest, record1, table, m_prebuilt); - if ((rcode = wsrep_append_key(thd, trx, table_share, - (const char*) digest, - 16, key_type))) { + if (int rcode = wsrep_append_key(thd, trx, table_share, + digest, 16, + key_type)) { DBUG_RETURN(rcode); } } @@ -13333,17 +13433,10 @@ innobase_drop_database( @param[in,out] trx InnoDB data dictionary transaction @param[in] from old table name @param[in] to new table name -@param[in] commit whether to commit trx -@param[in] use_fk whether to parse and enforce FOREIGN KEY constraints +@param[in] commit whether to commit trx (and to enforce FOREIGN KEY) @return DB_SUCCESS or error code */ -inline -dberr_t -innobase_rename_table( - trx_t* trx, - const char* from, - const char* to, - bool commit, - bool use_fk) +inline dberr_t innobase_rename_table(trx_t *trx, const char *from, + const char *to, bool commit) { dberr_t error; char norm_to[FN_REFLEN]; @@ -13376,6 +13469,9 @@ innobase_rename_table( Convert lock_wait_timeout unit from second to 250 milliseconds */ long int lock_wait_timeout = thd_lock_wait_timeout(trx->mysql_thd) * 4; if (table != NULL) { + if (commit) { + dict_stats_wait_bg_to_stop_using_table(table, trx); + } for (dict_index_t* index = dict_table_get_first_index(table); index != NULL; index = dict_table_get_next_index(index)) { @@ -13389,7 +13485,9 @@ innobase_rename_table( } } } - dict_table_close(table, TRUE, FALSE); + if (!commit) { + dict_table_close(table, TRUE, FALSE); + } } /* FTS sync is in progress. We shall timeout this operation */ @@ -13399,7 +13497,7 @@ innobase_rename_table( } error = row_rename_table_for_mysql(norm_from, norm_to, trx, commit, - use_fk); + commit); if (error != DB_SUCCESS) { if (error == DB_TABLE_NOT_FOUND @@ -13451,6 +13549,10 @@ innobase_rename_table( func_exit: if (commit) { + if (table) { + table->stats_bg_flag &= ~BG_STAT_SHOULD_QUIT; + dict_table_close(table, TRUE, FALSE); + } row_mysql_unlock_data_dictionary(trx); } @@ -13505,9 +13607,11 @@ int ha_innobase::truncate() ++trx->will_lock; trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); row_mysql_lock_data_dictionary(trx); + dict_stats_wait_bg_to_stop_using_table(ib_table, trx); + int err = convert_error_code_to_mysql( innobase_rename_table(trx, ib_table->name.m_name, temp_name, - false, false), + false), ib_table->flags, m_user_thd); if (err) { trx_rollback_for_mysql(trx); @@ -13590,7 +13694,7 @@ ha_innobase::rename_table( ++trx->will_lock; trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); - dberr_t error = innobase_rename_table(trx, from, to, true, true); + dberr_t error = innobase_rename_table(trx, from, to, true); DEBUG_SYNC(thd, "after_innobase_rename_table"); @@ -14137,6 +14241,10 @@ ha_innobase::info_low( ib_table = m_prebuilt->table; DBUG_ASSERT(ib_table->get_ref_count() > 0); + if (!ib_table->is_readable()) { + ib_table->stat_initialized = true; + } + if (flag & HA_STATUS_TIME) { if (is_analyze || innobase_stats_on_metadata) { @@ -14148,6 +14256,13 @@ ha_innobase::info_low( if (dict_stats_is_persistent_enabled(ib_table)) { if (is_analyze) { + row_mysql_lock_data_dictionary( + m_prebuilt->trx); + dict_stats_recalc_pool_del(ib_table); + dict_stats_wait_bg_to_stop_using_table( + ib_table, m_prebuilt->trx); + row_mysql_unlock_data_dictionary( + m_prebuilt->trx); opt = DICT_STATS_RECALC_PERSISTENT; } else { /* This is e.g. 'SHOW INDEXES', fetch @@ -14160,6 +14275,13 @@ ha_innobase::info_low( ret = dict_stats_update(ib_table, opt); + if (opt == DICT_STATS_RECALC_PERSISTENT) { + mutex_enter(&dict_sys.mutex); + ib_table->stats_bg_flag + &= byte(~BG_STAT_SHOULD_QUIT); + mutex_exit(&dict_sys.mutex); + } + if (ret != DB_SUCCESS) { m_prebuilt->trx->op_info = ""; DBUG_RETURN(HA_ERR_GENERIC); @@ -14175,6 +14297,8 @@ ha_innobase::info_low( DBUG_EXECUTE_IF("dict_sys_mutex_avoid", goto func_exit;); + dict_stats_init(ib_table); + if (flag & HA_STATUS_VARIABLE) { ulint stat_clustered_index_size; @@ -15317,10 +15441,6 @@ ha_innobase::extra( case HA_EXTRA_END_ALTER_COPY: m_prebuilt->table->skip_alter_undo = 0; break; - case HA_EXTRA_FAKE_START_STMT: - trx_register_for_2pc(m_prebuilt->trx); - m_prebuilt->sql_stat_start = true; - break; default:/* Do nothing */ ; } @@ -17120,7 +17240,8 @@ innodb_io_capacity_update( " higher than innodb_io_capacity_max %lu", in_val, srv_max_io_capacity); - srv_max_io_capacity = in_val * 2; + srv_max_io_capacity = (in_val & ~(~0UL >> 1)) + ? in_val : in_val * 2; push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, @@ -18559,54 +18680,59 @@ static struct st_mysql_storage_engine innobase_storage_engine= #ifdef WITH_WSREP -/** This function is used to kill one transaction. - -This transaction was open on this node (not-yet-committed), and a -conflicting writeset from some other node that was being applied -caused a locking conflict. First committed (from other node) -wins, thus open transaction is rolled back. BF stands for -brute-force: any transaction can get aborted by galera any time -it is necessary. +struct bg_wsrep_kill_trx_arg { + my_thread_id thd_id, bf_thd_id; + trx_id_t trx_id, bf_trx_id; + bool signal; +}; -This conflict can happen only when the replicated writeset (from -other node) is being applied, not when it’s waiting in the queue. -If our local transaction reached its COMMIT and this conflicting -writeset was in the queue, then it should fail the local -certification test instead. +/** Kill one transaction from a background manager thread -A brute force abort is only triggered by a locking conflict -between a writeset being applied by an applier thread (slave thread) -and an open transaction on the node, not by a Galera writeset -comparison as in the local certification failure. +wsrep_innobase_kill_one_trx() is invoked when lock_sys.mutex and trx mutex +are taken, wsrep_thd_bf_abort() cannot be used there as it takes THD mutexes +that must be taken before lock_sys.mutex and trx mutex. That's why +wsrep_innobase_kill_one_trx only posts the killing task to the manager thread +and the actual killing happens asynchronously here. -@param[in] bf_thd Brute force (BF) thread -@param[in,out] victim_trx Vimtim trx to be killed -@param[in] signal Should victim be signaled */ -UNIV_INTERN -int -wsrep_innobase_kill_one_trx( - THD* bf_thd, - trx_t *victim_trx, - bool signal) +As no mutexes were held we don't know whether THD or trx pointers are still +valid, so we need to pass thread/trx ids and perform a lookup. +*/ +static void bg_wsrep_kill_trx(void *void_arg) { - ut_ad(bf_thd); - ut_ad(victim_trx); - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(victim_trx)); + bg_wsrep_kill_trx_arg *arg= (bg_wsrep_kill_trx_arg *)void_arg; + THD *thd, *bf_thd; + trx_t *victim_trx; + bool aborting= false; - DBUG_ENTER("wsrep_innobase_kill_one_trx"); + if ((bf_thd= find_thread_by_id(arg->bf_thd_id))) + wsrep_thd_LOCK(bf_thd); + if ((thd= find_thread_by_id(arg->thd_id))) + wsrep_thd_LOCK(thd); - THD *thd= (THD *) victim_trx->mysql_thd; - ut_ad(thd); - /* Note that bf_trx might not exist here e.g. on MDL conflict - case (test: galera_concurrent_ctas). Similarly, BF thread - could be also acquiring MDL-lock causing victim to be - aborted. However, we have not yet called innobase_trx_init() - for BF transaction (test: galera_many_columns)*/ - trx_t* bf_trx= thd_to_trx(bf_thd); - DBUG_ASSERT(wsrep_on(bf_thd)); + if (!thd || !bf_thd || !(victim_trx= thd_to_trx(thd))) + goto ret0; + + lock_mutex_enter(); + trx_mutex_enter(victim_trx); + if (victim_trx->id != arg->trx_id + || victim_trx->state == TRX_STATE_COMMITTED_IN_MEMORY) + { + /* Apparently victim trx was meanwhile rolled back or + committed. Tell bf thd not to wait, in case it already + started to. */ + trx_t *trx= thd_to_trx(bf_thd); + if (!trx) { + /* bf_thd might not be associated with a + transaction, in case of MDL conflict */ + } else if (lock_t *lock = trx->lock.wait_lock) { + trx_mutex_enter(trx); + lock_cancel_waiting_and_release(lock); + trx_mutex_exit(trx); + } + goto ret1; + } - wsrep_thd_LOCK(thd); + DBUG_ASSERT(wsrep_on(bf_thd)); WSREP_LOG_CONFLICT(bf_thd, thd, TRUE); @@ -18614,7 +18740,7 @@ wsrep_innobase_kill_one_trx( "seqno: %lld client_state: %s client_mode: %s transaction_mode: %s " "query: %s", wsrep_thd_is_BF(bf_thd, false) ? "BF" : "normal", - bf_trx ? bf_trx->id : TRX_ID_MAX, + arg->bf_trx_id, thd_get_thread_id(bf_thd), wsrep_thd_trx_seqno(bf_thd), wsrep_thd_client_state_str(bf_thd), @@ -18639,30 +18765,86 @@ wsrep_innobase_kill_one_trx( if (wsrep_thd_set_wsrep_aborter(bf_thd, thd)) { WSREP_DEBUG("innodb kill transaction skipped due to wsrep_aborter set"); - wsrep_thd_UNLOCK(thd); - DBUG_RETURN(0); + goto ret1; } - /* Note that we need to release this as it will be acquired - below in wsrep-lib */ - wsrep_thd_UNLOCK(thd); - DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + aborting= true; - if (wsrep_thd_bf_abort(bf_thd, thd, signal)) - { - lock_t* wait_lock = victim_trx->lock.wait_lock; - if (wait_lock) { - DBUG_ASSERT(victim_trx->is_wsrep()); - WSREP_DEBUG("victim has wait flag: %lu", - thd_get_thread_id(thd)); - - WSREP_DEBUG("canceling wait lock"); - victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; - lock_cancel_waiting_and_release(wait_lock); +ret1: + trx_mutex_exit(victim_trx); + lock_mutex_exit(); +ret0: + if (thd) { + wsrep_thd_UNLOCK(thd); + if (aborting) { + DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + wsrep_thd_bf_abort(bf_thd, thd, arg->signal); } + wsrep_thd_kill_UNLOCK(thd); + } + if (bf_thd) { + wsrep_thd_UNLOCK(bf_thd); + wsrep_thd_kill_UNLOCK(bf_thd); } + free(arg); +} - DBUG_RETURN(0); +/** This function is used to kill one transaction. + +This transaction was open on this node (not-yet-committed), and a +conflicting writeset from some other node that was being applied +caused a locking conflict. First committed (from other node) +wins, thus open transaction is rolled back. BF stands for +brute-force: any transaction can get aborted by galera any time +it is necessary. + +This conflict can happen only when the replicated writeset (from +other node) is being applied, not when it’s waiting in the queue. +If our local transaction reached its COMMIT and this conflicting +writeset was in the queue, then it should fail the local +certification test instead. + +A brute force abort is only triggered by a locking conflict +between a writeset being applied by an applier thread (slave thread) +and an open transaction on the node, not by a Galera writeset +comparison as in the local certification failure. + +@param[in] bf_thd Brute force (BF) thread +@param[in,out] victim_trx Transaction to be killed +@param[in] signal Should victim be signaled */ +void +wsrep_innobase_kill_one_trx( + THD* bf_thd, + trx_t *victim_trx, + bool signal) +{ + ut_ad(bf_thd); + ut_ad(victim_trx); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(victim_trx)); + + DBUG_ENTER("wsrep_innobase_kill_one_trx"); + + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", + { + const char act[]= + "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + + trx_t* bf_trx= thd_to_trx(bf_thd); + bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)malloc(sizeof(*arg)); + arg->thd_id = thd_get_thread_id(victim_trx->mysql_thd); + arg->trx_id = victim_trx->id; + arg->bf_thd_id = thd_get_thread_id(bf_thd); + arg->bf_trx_id = bf_trx ? bf_trx->id : TRX_ID_MAX; + arg->signal = signal; + mysql_manager_submit(bg_wsrep_kill_trx, arg); + + DBUG_VOID_RETURN; } /** This function forces the victim transaction to abort. Aborting the @@ -18675,14 +18857,14 @@ wsrep_innobase_kill_one_trx( @return -1 victim thread was aborted (no transaction) */ static -int +void wsrep_abort_transaction( handlerton*, THD *bf_thd, THD *victim_thd, my_bool signal) { - DBUG_ENTER("wsrep_innobase_abort_thd"); + DBUG_ENTER("wsrep_abort_transaction"); ut_ad(bf_thd); ut_ad(victim_thd); @@ -18696,17 +18878,47 @@ wsrep_abort_transaction( if (victim_trx) { lock_mutex_enter(); trx_mutex_enter(victim_trx); - int rcode= wsrep_innobase_kill_one_trx(bf_thd, - victim_trx, signal); + victim_trx->lock.was_chosen_as_wsrep_victim= true; trx_mutex_exit(victim_trx); lock_mutex_exit(); + + wsrep_thd_kill_LOCK(victim_thd); + wsrep_thd_LOCK(victim_thd); + bool aborting= !wsrep_thd_set_wsrep_aborter(bf_thd, victim_thd); + wsrep_thd_UNLOCK(victim_thd); + if (aborting) { + DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", + { + const char act[]= + "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + wsrep_thd_bf_abort(bf_thd, victim_thd, signal); + } + wsrep_thd_kill_UNLOCK(victim_thd); + wsrep_srv_conc_cancel_wait(victim_trx); - DBUG_RETURN(rcode); + DBUG_VOID_RETURN; } else { + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", + { + const char act[]= + "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + wsrep_thd_kill_LOCK(victim_thd); wsrep_thd_bf_abort(bf_thd, victim_thd, signal); + wsrep_thd_kill_UNLOCK(victim_thd); } - DBUG_RETURN(-1); + DBUG_VOID_RETURN; } static @@ -18741,6 +18953,14 @@ innobase_wsrep_get_checkpoint( } #endif /* WITH_WSREP */ +static void innodb_idle_flush_pct_update(THD *thd, st_mysql_sys_var *var, + void*, const void *save) +{ + innodb_idle_flush_pct = *static_cast<const ulong*>(save); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_WRONG_COMMAND, deprecated_idle_flush_pct); +} + /* plugin options */ static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm, @@ -18769,7 +18989,7 @@ static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm, " Files updated when this option is set to crc32 or strict_crc32 will" " not be readable by MariaDB versions older than 10.0.4;" " new files created with full_crc32 are readable by MariaDB 10.4.3+", - NULL, NULL, SRV_CHECKSUM_ALGORITHM_CRC32, + NULL, innodb_checksum_algorithm_update, SRV_CHECKSUM_ALGORITHM_CRC32, &innodb_checksum_algorithm_typelib); static MYSQL_SYSVAR_BOOL(log_checksums, innodb_log_checksums, @@ -18796,12 +19016,10 @@ static MYSQL_SYSVAR_BOOL(doublewrite, srv_use_doublewrite_buf, " Disable with --skip-innodb-doublewrite.", NULL, NULL, TRUE); -static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes, +static MYSQL_SYSVAR_BOOL(use_atomic_writes, srv_use_atomic_writes, PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, "Enable atomic writes, instead of using the doublewrite buffer, for files " "on devices that supports atomic writes. " - "To use this option one must use " - "innodb_file_per_table=1, innodb_flush_method=O_DIRECT. " "This option only works on Linux with either FusionIO cards using " "the directFS filesystem or with Shannon cards using any file system.", NULL, NULL, TRUE); @@ -18830,12 +19048,10 @@ static MYSQL_SYSVAR_ULONG(io_capacity_max, srv_max_io_capacity, SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT, 100, SRV_MAX_IO_CAPACITY_LIMIT, 0); -static MYSQL_SYSVAR_ULONG(idle_flush_pct, - srv_idle_flush_pct, +static MYSQL_SYSVAR_ULONG(idle_flush_pct, innodb_idle_flush_pct, PLUGIN_VAR_RQCMDARG, - "Up to what percentage of dirty pages should be flushed when innodb " - "finds it has spare resources to do so.", - NULL, NULL, 100, 0, 100, 0); + "DEPRECATED. This setting has no effect.", + NULL, innodb_idle_flush_pct_update, 100, 0, 100, 0); #ifdef UNIV_DEBUG static MYSQL_SYSVAR_BOOL(background_drop_list_empty, @@ -19813,11 +20029,6 @@ static MYSQL_SYSVAR_BOOL(master_thread_disabled_debug, PLUGIN_VAR_OPCMDARG, "Disable master thread", NULL, srv_master_thread_disabled_debug_update, FALSE); - -static MYSQL_SYSVAR_UINT(simulate_comp_failures, srv_simulate_comp_failures, - PLUGIN_VAR_NOCMDARG, - "Simulate compression failures.", - NULL, NULL, 0, 0, 99, 0); #endif /* UNIV_DEBUG */ static MYSQL_SYSVAR_BOOL(force_primary_key, @@ -20134,7 +20345,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(compression_pad_pct_max), MYSQL_SYSVAR(default_row_format), #ifdef UNIV_DEBUG - MYSQL_SYSVAR(simulate_comp_failures), MYSQL_SYSVAR(trx_rseg_n_slots_debug), MYSQL_SYSVAR(limit_optimistic_insert_debug), MYSQL_SYSVAR(trx_purge_view_update_only_debug), @@ -20368,7 +20578,7 @@ static bool table_name_parse( memcpy(tbl_buf, tbl_name.m_name + dbnamelen + 1, tblnamelen); tbl_buf[tblnamelen] = 0; - filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true); + dbnamelen = filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true); if (tblnamelen > TEMP_FILE_PREFIX_LENGTH && !strncmp(tbl_buf, TEMP_FILE_PREFIX, TEMP_FILE_PREFIX_LENGTH)) { @@ -20380,7 +20590,7 @@ static bool table_name_parse( tblnamelen = is_part - tbl_buf; } - filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true); + tblnamelen = filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true); return true; } @@ -20812,11 +21022,11 @@ innobase_get_computed_value( field = dtuple_get_nth_v_field(row, col->v_pos); - my_bitmap_map* old_write_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->write_set); - my_bitmap_map* old_read_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->read_set); + MY_BITMAP *old_write_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->write_set); + MY_BITMAP *old_read_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->read_set); ret = mysql_table->update_virtual_field(mysql_table->field[col->m_col.ind]); - dbug_tmp_restore_column_map(mysql_table->read_set, old_read_set); - dbug_tmp_restore_column_map(mysql_table->write_set, old_write_set); + dbug_tmp_restore_column_map(&mysql_table->read_set, old_read_set); + dbug_tmp_restore_column_map(&mysql_table->write_set, old_write_set); if (ret != 0) { DBUG_RETURN(NULL); @@ -21533,11 +21743,12 @@ ib_push_warning( va_start(args, format); buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME)); - vsprintf(buf,format, args); - - push_warning_printf( - thd, Sql_condition::WARN_LEVEL_WARN, - uint(convert_error_code_to_mysql(error, 0, thd)), buf); + buf[MAX_BUF_SIZE - 1] = 0; + vsnprintf(buf, MAX_BUF_SIZE - 1, format, args); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + uint(convert_error_code_to_mysql(error, 0, + thd)), + buf); my_free(buf); va_end(args); } @@ -21565,7 +21776,8 @@ ib_push_warning( if (thd) { va_start(args, format); buf = (char *)my_malloc(MAX_BUF_SIZE, MYF(MY_WME)); - vsprintf(buf,format, args); + buf[MAX_BUF_SIZE - 1] = 0; + vsnprintf(buf, MAX_BUF_SIZE - 1, format, args); push_warning_printf( thd, Sql_condition::WARN_LEVEL_WARN, diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index eb2afb93595..619dfca34fb 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -45,7 +45,7 @@ struct ha_table_option_struct uint atomic_writes; /*!< Use atomic writes for this table if this options is ON or in DEFAULT if - srv_use_atomic_writes=1. + innodb_use_atomic_writes. Atomic writes are not used if value OFF.*/ uint encryption; /*!< DEFAULT, ON, OFF */ @@ -968,6 +968,3 @@ which is in the prepared state @return 0 or error number */ int innobase_rollback_by_xid(handlerton* hton, XID* xid); - -/** Free tablespace resources allocated. */ -void innobase_space_shutdown(); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index d473f9dec8f..a330cbc5460 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2005, 2019, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -766,6 +766,13 @@ inline void dict_table_t::rollback_instant( const ulint* col_map) { ut_d(dict_sys.assert_locked()); + + if (cols == old_cols) { + /* Alter fails before instant operation happens. + So there is no need to do rollback instant operation */ + return; + } + dict_index_t* index = indexes.start; mtr_t mtr; mtr.start(); @@ -1060,13 +1067,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx @return whether the table will be rebuilt */ bool need_rebuild () const { return(old_table != new_table); } - /** Clear uncommmitted added indexes after a failed operation. */ - void clear_added_indexes() - { - for (ulint i= 0; i < num_to_add_index; i++) - add_index[i]->detach_columns(true); - } - /** Convert table-rebuilding ALTER to instant ALTER. */ void prepare_instant() { @@ -1164,6 +1164,42 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx } } + /** @return whether the given column is being added */ + bool is_new_vcol(const dict_v_col_t &v_col) const + { + for (ulint i= 0; i < num_to_add_vcol; i++) + if (&add_vcol[i] == &v_col) + return true; + return false; + } + + /** During rollback, make newly added indexes point to + newly added virtual columns. */ + void clean_new_vcol_index() + { + ut_ad(old_table == new_table); + const dict_index_t *index= dict_table_get_first_index(old_table); + while ((index= dict_table_get_next_index(index)) != NULL) + { + if (!index->has_virtual() || index->is_committed()) + continue; + ulint n_drop_new_vcol= index->get_new_n_vcol(); + for (ulint i= 0; n_drop_new_vcol && i < index->n_fields; i++) + { + dict_col_t *col= index->fields[i].col; + /* Skip the non-virtual and old virtual columns */ + if (!col->is_virtual()) + continue; + dict_v_col_t *vcol= reinterpret_cast<dict_v_col_t*>(col); + if (!is_new_vcol(*vcol)) + continue; + + index->fields[i].col= &index->new_vcol_info-> + add_drop_v_col(index->heap, vcol, --n_drop_new_vcol)->m_col; + } + } + } + private: // Disable copying ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&); @@ -3433,9 +3469,9 @@ innobase_row_to_mysql( } } if (table->vfield) { - my_bitmap_map* old_read_set = tmp_use_all_columns(table, table->read_set); + MY_BITMAP* old_read_set = tmp_use_all_columns(table, &table->read_set); table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ); - tmp_restore_column_map(table->read_set, old_read_set); + tmp_restore_column_map(&table->read_set, old_read_set); } } @@ -3791,9 +3827,11 @@ innobase_fts_check_doc_id_index( for (index = dict_table_get_first_index(table); index; index = dict_table_get_next_index(index)) { + /* Check if there exists a unique index with the name of - FTS_DOC_ID_INDEX_NAME */ - if (innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME)) { + FTS_DOC_ID_INDEX_NAME and ignore the corrupted index */ + if (index->type & DICT_CORRUPT + || innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME)) { continue; } @@ -4101,7 +4139,7 @@ online_retry_drop_indexes_low( ut_ad(table->get_ref_count() >= 1); if (table->drop_aborted) { - row_merge_drop_indexes(trx, table, TRUE); + row_merge_drop_indexes(trx, table, true); } } @@ -5907,11 +5945,13 @@ add_all_virtual: const rec_t* rec = btr_pcur_get_rec(&pcur); que_thr_t* thr = pars_complete_graph_for_exec( NULL, trx, ctx->heap, NULL); + const bool is_root = block->page.id.page_no() == index->page; dberr_t err = DB_SUCCESS; if (rec_is_metadata(rec, *index)) { ut_ad(page_rec_is_user_rec(rec)); - if (!rec_is_alter_metadata(rec, *index) + if (is_root + && !rec_is_alter_metadata(rec, *index) && !index->table->instant && !page_has_next(block->frame) && page_rec_is_last(rec, block->frame)) { @@ -5978,7 +6018,7 @@ add_all_virtual: offsets = rec_get_offsets( btr_pcur_get_rec(&pcur), index, offsets, - true, ULINT_UNDEFINED, &offsets_heap); + index->n_core_fields, ULINT_UNDEFINED, &offsets_heap); if (big_rec) { if (err == DB_SUCCESS) { err = btr_store_big_rec_extern_fields( @@ -5993,7 +6033,8 @@ add_all_virtual: } btr_pcur_close(&pcur); goto func_exit; - } else if (page_rec_is_supremum(rec) && !index->table->instant) { + } else if (is_root && page_rec_is_supremum(rec) + && !index->table->instant) { empty_table: /* The table is empty. */ ut_ad(fil_page_index_page_check(block->frame)); @@ -6523,6 +6564,7 @@ new_clustered_failed: } if (dict_col_name_is_reserved(field->field_name.str)) { +wrong_column_name: dict_mem_table_free(ctx->new_table); ctx->new_table = ctx->old_table; my_error(ER_WRONG_COLUMN_NAME, MYF(0), @@ -6530,6 +6572,21 @@ new_clustered_failed: goto new_clustered_failed; } + /** Note the FTS_DOC_ID name is case sensitive due + to internal query parser. + FTS_DOC_ID column must be of BIGINT NOT NULL type + and it should be in all capitalized characters */ + if (!innobase_strcasecmp(field->field_name.str, + FTS_DOC_ID_COL_NAME)) { + if (col_type != DATA_INT + || field->real_maybe_null() + || col_len != sizeof(doc_id_t) + || strcmp(field->field_name.str, + FTS_DOC_ID_COL_NAME)) { + goto wrong_column_name; + } + } + if (is_virtual) { dict_mem_table_add_v_col( ctx->new_table, ctx->heap, @@ -6878,7 +6935,7 @@ new_table_failed: for (ulint a = 0; a < ctx->num_to_add_index; a++) { dict_index_t* index = ctx->add_index[a]; - const bool has_new_v_col = index->has_new_v_col; + const ulint n_v_col = index->get_new_n_vcol(); index = create_index_dict(ctx->trx, index, add_v); error = ctx->trx->error_state; if (error != DB_SUCCESS) { @@ -6908,7 +6965,9 @@ error_handling_drop_uncached_1: goto error_handling_drop_uncached_1; } index->parser = index_defs[a].parser; - index->has_new_v_col = has_new_v_col; + if (n_v_col) { + index->assign_new_v_col(n_v_col); + } /* Note the id of the transaction that created this index, we use it to restrict readers from accessing this index, to ensure read consistency. */ @@ -6978,7 +7037,7 @@ error_handling_drop_uncached_1: for (ulint a = 0; a < ctx->num_to_add_index; a++) { dict_index_t* index = ctx->add_index[a]; - const bool has_new_v_col = index->has_new_v_col; + const ulint n_v_col = index->get_new_n_vcol(); DBUG_EXECUTE_IF( "create_index_metadata_fail", if (a + 1 == ctx->num_to_add_index) { @@ -7010,7 +7069,9 @@ error_handling_drop_uncached: } index->parser = index_defs[a].parser; - index->has_new_v_col = has_new_v_col; + if (n_v_col) { + index->assign_new_v_col(n_v_col); + } /* Note the id of the transaction that created this index, we use it to restrict readers from accessing this index, to ensure read consistency. */ @@ -7235,7 +7296,7 @@ error_handled: online_retry_drop_indexes_with_trx(user_table, ctx->trx); } else { ut_ad(!ctx->need_rebuild()); - row_merge_drop_indexes(ctx->trx, user_table, TRUE); + row_merge_drop_indexes(ctx->trx, user_table, true); trx_commit_for_mysql(ctx->trx); } @@ -8598,7 +8659,6 @@ oom: that we hold at most a shared lock on the table. */ m_prebuilt->trx->error_info = NULL; ctx->trx->error_state = DB_SUCCESS; - ctx->clear_added_indexes(); DBUG_RETURN(true); } @@ -8690,17 +8750,18 @@ temparary index prefix @param table the TABLE @param locked TRUE=table locked, FALSE=may need to do a lazy drop @param trx the transaction -*/ -static MY_ATTRIBUTE((nonnull)) +@param alter_trx transaction which takes S-lock on the table + while creating the index */ +static void innobase_rollback_sec_index( -/*========================*/ - dict_table_t* user_table, - const TABLE* table, - ibool locked, - trx_t* trx) + dict_table_t* user_table, + const TABLE* table, + bool locked, + trx_t* trx, + const trx_t* alter_trx=NULL) { - row_merge_drop_indexes(trx, user_table, locked); + row_merge_drop_indexes(trx, user_table, locked, alter_trx); /* Free the table->fts only if there is no FTS_DOC_ID in the table */ @@ -8795,7 +8856,12 @@ rollback_inplace_alter_table( DBUG_ASSERT(ctx->new_table == prebuilt->table); innobase_rollback_sec_index( - prebuilt->table, table, FALSE, ctx->trx); + prebuilt->table, table, + (ha_alter_info->alter_info->requested_lock + == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE), + ctx->trx, prebuilt->trx); + + ctx->clean_new_vcol_index(); } trx_commit_for_mysql(ctx->trx); @@ -8937,6 +9003,7 @@ innobase_rename_column_try( const char* to) { dberr_t error; + bool clust_has_prefixes = false; DBUG_ENTER("innobase_rename_column_try"); @@ -8996,6 +9063,39 @@ innobase_rename_column_try( if (error != DB_SUCCESS) { goto err_exit; } + + if (!has_prefixes || !clust_has_prefixes + || f.prefix_len) { + continue; + } + + /* For secondary indexes, the + has_prefixes check can be 'polluted' + by PRIMARY KEY column prefix. Try also + the simpler encoding of SYS_FIELDS.POS. */ + info = pars_info_create(); + + pars_info_add_ull_literal(info, "indexid", index->id); + pars_info_add_int4_literal(info, "nth", i); + pars_info_add_str_literal(info, "new", to); + + error = que_eval_sql( + info, + "PROCEDURE RENAME_SYS_FIELDS_PROC () IS\n" + "BEGIN\n" + "UPDATE SYS_FIELDS SET COL_NAME=:new\n" + "WHERE INDEX_ID=:indexid\n" + "AND POS=:nth;\n" + "END;\n", + FALSE, trx); + + if (error != DB_SUCCESS) { + goto err_exit; + } + } + + if (index == dict_table_get_first_index(ctx.old_table)) { + clust_has_prefixes = has_prefixes; } } @@ -10150,6 +10250,44 @@ innobase_page_compression_try( DBUG_RETURN(false); } +static +void +dict_stats_try_drop_table(THD *thd, const table_name_t &name, + const LEX_CSTRING &table_name) +{ + char errstr[1024]; + if (dict_stats_drop_table(name.m_name, errstr, sizeof(errstr)) != DB_SUCCESS) + { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ALTER_INFO, + "Deleting persistent statistics" + " for table '%s' in InnoDB failed: %s", + table_name.str, + errstr); + } +} + +/** Evict the table from cache and reopen it. Drop outdated statistics. + @param thd mariadb THD entity + @param table innodb table + @param maria_table_name user-friendly table name for errors + @return newly opened table */ +static +dict_table_t* +innobase_reload_table(THD *thd, dict_table_t *table, + const LEX_CSTRING &table_name) +{ + char *tb_name= strdup(table->name.m_name); + dict_table_close(table, true, false); + dict_sys.remove(table); + table= dict_table_open_on_name(tb_name, TRUE, TRUE, + DICT_ERR_IGNORE_FK_NOKEY); + + /* Drop outdated table stats. */ + dict_stats_try_drop_table(thd, table->name, table_name); + free(tb_name); + return table; +} + /** Commit the changes made during prepare_inplace_alter_table() and inplace_alter_table() inside the data dictionary tables, when not rebuilding the table. @@ -11358,44 +11496,25 @@ foreign_fail: Currently dict_load_column_low() is the only place where num_base for virtual columns is assigned to nonzero. */ if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol + || (ctx0->new_table->n_v_cols && !new_clustered + && (ha_alter_info->alter_info->drop_list.elements + || ha_alter_info->alter_info->create_list.elements)) || (ctx0->is_instant() && m_prebuilt->table->n_v_cols && ha_alter_info->handler_flags & ALTER_STORED_COLUMN_ORDER)) { - /* FIXME: this workaround does not seem to work with - partitioned tables */ DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1); - trx_commit_for_mysql(m_prebuilt->trx); - char tb_name[NAME_LEN * 2 + 1 + 1]; - strcpy(tb_name, m_prebuilt->table->name.m_name); - dict_table_close(m_prebuilt->table, true, false); if (ctx0->is_instant()) { for (unsigned i = ctx0->old_n_v_cols; i--; ) { ctx0->old_v_cols[i].~dict_v_col_t(); } const_cast<unsigned&>(ctx0->old_n_v_cols) = 0; } - dict_sys.remove(m_prebuilt->table); - m_prebuilt->table = dict_table_open_on_name( - tb_name, TRUE, TRUE, DICT_ERR_IGNORE_FK_NOKEY); - /* Drop outdated table stats. */ - char errstr[1024]; - if (dict_stats_drop_table( - m_prebuilt->table->name.m_name, - errstr, sizeof(errstr)) - != DB_SUCCESS) { - push_warning_printf( - m_user_thd, - Sql_condition::WARN_LEVEL_WARN, - ER_ALTER_INFO, - "Deleting persistent statistics" - " for table '%s' in" - " InnoDB failed: %s", - table->s->table_name.str, - errstr); - } + m_prebuilt->table = innobase_reload_table(m_user_thd, + m_prebuilt->table, + table->s->table_name); row_mysql_unlock_data_dictionary(trx); trx->free(); @@ -11455,25 +11574,12 @@ foreign_fail: old copy of the table (which was renamed to ctx->tmp_name). */ - char errstr[1024]; - DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name, ctx->tmp_name)); - if (dict_stats_drop_table( - ctx->new_table->name.m_name, - errstr, sizeof(errstr)) - != DB_SUCCESS) { - push_warning_printf( - m_user_thd, - Sql_condition::WARN_LEVEL_WARN, - ER_ALTER_INFO, - "Deleting persistent statistics" - " for rebuilt table '%s' in" - " InnoDB failed: %s", - table->s->table_name.str, - errstr); - } + dict_stats_try_drop_table(m_user_thd, + ctx->new_table->name, + table->s->table_name); DBUG_EXECUTE_IF("ib_ddl_crash_before_commit", DBUG_SUICIDE();); diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 469836f0955..fccd87ab416 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2020, MariaDB Corporation. +Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3870,7 +3870,7 @@ dump: row_ins_sec_index_entry_by_modify(BTR_MODIFY_LEAF). */ ut_ad(rec_get_deleted_flag(rec, page_is_comp(page))); - offsets = rec_get_offsets(rec, index, NULL, true, + offsets = rec_get_offsets(rec, index, NULL, index->n_fields, ULINT_UNDEFINED, &heap); update = row_upd_build_sec_rec_difference_binary( rec, index, offsets, entry, heap); @@ -4043,7 +4043,8 @@ ibuf_delete( ut_ad(ibuf_inside(mtr)); ut_ad(dtuple_check_typed(entry)); - ut_ad(!dict_index_is_spatial(index)); + ut_ad(!index->is_spatial()); + ut_ad(!index->is_clust()); low_match = page_cur_search(block, index, entry, &page_cur); @@ -4062,8 +4063,8 @@ ibuf_delete( rec_offs_init(offsets_); - offsets = rec_get_offsets( - rec, index, offsets, true, ULINT_UNDEFINED, &heap); + offsets = rec_get_offsets(rec, index, offsets, index->n_fields, + ULINT_UNDEFINED, &heap); if (page_get_n_recs(page) <= 1 || !(REC_INFO_DELETED_FLAG @@ -4858,6 +4859,13 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space) bitmap_page = ibuf_bitmap_get_map_page( page_id_t(space->id, page_no), zip_size, &mtr); + if (!bitmap_page) { + mutex_exit(&ibuf_mutex); + ibuf_exit(&mtr); + mtr_commit(&mtr); + return DB_CORRUPTION; + } + if (buf_is_zeroes(span<const byte>(bitmap_page, physical_size))) { /* This means we got all-zero page instead of @@ -4881,11 +4889,6 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space) continue; } - if (!bitmap_page) { - mutex_exit(&ibuf_mutex); - return DB_CORRUPTION; - } - for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size; i++) { const ulint offset = page_no + i; const page_id_t cur_page_id(space->id, offset); diff --git a/storage/innobase/include/btr0bulk.h b/storage/innobase/include/btr0bulk.h index 4c5294f9b5f..b8428186383 100644 --- a/storage/innobase/include/btr0bulk.h +++ b/storage/innobase/include/btr0bulk.h @@ -326,6 +326,8 @@ public: /** Re-latch all latches */ void latch(); + table_name_t table_name() { return m_index->table->name; } + private: /** Insert a tuple to a page in a level @param[in] tuple tuple to insert diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h index 38960b1d15c..b0b61a4d1ff 100644 --- a/storage/innobase/include/btr0pcur.h +++ b/storage/innobase/include/btr0pcur.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -497,8 +497,10 @@ struct btr_pcur_t{ /** if cursor position is stored, contains an initial segment of the latest record cursor was positioned either on, before or after */ rec_t* old_rec; + /** btr_cur.index->n_core_fields when old_rec was copied */ + uint16 old_n_core_fields; /** number of fields in old_rec */ - ulint old_n_fields; + uint16 old_n_fields; /** BTR_PCUR_ON, BTR_PCUR_BEFORE, or BTR_PCUR_AFTER, depending on whether cursor was on, before, or after the old_rec record */ enum btr_pcur_pos_t rel_pos; diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h index 04ddf5b0a42..fc774b6ee60 100644 --- a/storage/innobase/include/data0data.h +++ b/storage/innobase/include/data0data.h @@ -544,6 +544,17 @@ struct dtuple_t { @param[in] index index possibly with instantly added columns */ void trim(const dict_index_t& index); + bool vers_history_row() const + { + for (ulint i = 0; i < n_fields; i++) { + const dfield_t* field = &fields[i]; + if (field->type.vers_sys_end()) { + return field->vers_history_row(); + } + } + return false; + } + /** @param info_bits the info_bits of a data tuple @return whether this is a hidden metadata record diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index a3f4baa4c31..0f730ffbcb7 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -2,7 +2,7 @@ Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index c6a506472df..dc85c85474c 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -2,7 +2,7 @@ Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -795,6 +795,35 @@ struct dict_v_col_t{ } }; +/** Data structure for newly added virtual column in a index. +It is used only during rollback_inplace_alter_table() of +addition of index depending on newly added virtual columns +and uses index heap. Should be freed when index is being +removed from cache. */ +struct dict_add_v_col_info +{ + ulint n_v_col; + dict_v_col_t *v_col; + + /** Add the newly added virtual column while rollbacking + the index which contains new virtual columns + @param col virtual column to be duplicated + @param offset offset where to duplicate virtual column */ + dict_v_col_t* add_drop_v_col(mem_heap_t *heap, dict_v_col_t *col, + ulint offset) + { + ut_ad(n_v_col); + ut_ad(offset < n_v_col); + if (!v_col) + v_col= static_cast<dict_v_col_t*> + (mem_heap_alloc(heap, n_v_col * sizeof *v_col)); + new (&v_col[offset]) dict_v_col_t(); + v_col[offset].m_col= col->m_col; + v_col[offset].v_pos= col->v_pos; + return &v_col[offset]; + } +}; + /** Data structure for newly added virtual column in a table */ struct dict_add_v_col_t{ /** number of new virtual column */ @@ -1039,9 +1068,13 @@ struct dict_index_t { dict_field_t* fields; /*!< array of field descriptions */ st_mysql_ftparser* parser; /*!< fulltext parser plugin */ - bool has_new_v_col; - /*!< whether it has a newly added virtual - column in ALTER */ + + /** It just indicates whether newly added virtual column + during alter. It stores column in case of alter failure. + It should use heap from dict_index_t. It should be freed + while removing the index from table. */ + dict_add_v_col_info* new_vcol_info; + bool index_fts_syncing;/*!< Whether the fts index is still syncing in the background; FIXME: remove this and use MDL */ @@ -1198,9 +1231,8 @@ public: /** @return whether the index is corrupted */ inline bool is_corrupted() const; - /** Detach the virtual columns from the index that is to be removed. - @param whether to reset fields[].col */ - void detach_columns(bool clear= false) + /** Detach the virtual columns from the index that is to be removed. */ + void detach_columns() { if (!has_virtual() || !cached) return; @@ -1210,8 +1242,6 @@ public: if (!col || !col->is_virtual()) continue; col->detach(*this); - if (clear) - fields[i].col= nullptr; } } @@ -1274,6 +1304,23 @@ public: bool vers_history_row(const rec_t* rec, bool &history_row); + /** Assign the number of new column to be added as a part + of the index + @param n_vcol number of virtual columns to be added */ + void assign_new_v_col(ulint n_vcol) + { + new_vcol_info= static_cast<dict_add_v_col_info*> + (mem_heap_zalloc(heap, sizeof *new_vcol_info)); + new_vcol_info->n_v_col= n_vcol; + } + + /* @return whether index has new virtual column */ + bool has_new_v_col() const { return new_vcol_info; } + + /* @return number of newly added virtual column */ + ulint get_new_n_vcol() const + { return new_vcol_info ? new_vcol_info->n_v_col : 0; } + /** Reconstruct the clustered index fields. */ inline void reconstruct_fields(); @@ -2286,6 +2333,17 @@ public: /** mysql_row_templ_t for base columns used for compute the virtual columns */ dict_vcol_templ_t* vc_templ; + + /* @return whether the table has any other transcation lock + other than the given transaction */ + bool has_lock_other_than(const trx_t *trx) const + { + for (lock_t *lock= UT_LIST_GET_FIRST(locks); lock; + lock= UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock)) + if (lock->trx != trx) + return true; + return false; + } }; inline void dict_index_t::set_modified(mtr_t& mtr) const diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 3001817a78c..873fcd67a3a 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -637,7 +637,7 @@ struct fil_node_t { /** Determine some file metadata when creating or reading the file. @param file the file that is being created, or OS_FILE_CLOSED */ void find_metadata(os_file_t file = OS_FILE_CLOSED -#ifdef UNIV_LINUX +#ifndef _WIN32 , struct stat* statbuf = NULL #endif ); diff --git a/storage/innobase/include/gis0rtree.ic b/storage/innobase/include/gis0rtree.ic index 2076b24b9b1..c829f0de255 100644 --- a/storage/innobase/include/gis0rtree.ic +++ b/storage/innobase/include/gis0rtree.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -57,7 +57,8 @@ rtr_page_cal_mbr( page = buf_block_get_frame(block); rec = page_rec_get_next(page_get_infimum_rec(page)); - offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page), + offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page) + ? index->n_fields : 0, ULINT_UNDEFINED, &heap); do { diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 28e5d1d4f56..108f6925ef7 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -231,7 +231,7 @@ innobase_casedn_str( #ifdef WITH_WSREP UNIV_INTERN -int +void wsrep_innobase_kill_one_trx( THD* bf_thd, trx_t *victim_trx, diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index dbc53d0b786..fd12b0e3c9e 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -2,7 +2,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are @@ -152,7 +152,6 @@ static const ulint OS_FILE_NORMAL = 62; /** Types for file create @{ */ static const ulint OS_DATA_FILE = 100; static const ulint OS_LOG_FILE = 101; -static const ulint OS_DATA_TEMP_FILE = 102; static const ulint OS_DATA_FILE_NO_O_DIRECT = 103; /* @} */ diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic index f0844ee1f73..e53f6d8f463 100644 --- a/storage/innobase/include/page0cur.ic +++ b/storage/innobase/include/page0cur.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2018, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -273,7 +273,8 @@ page_cur_tuple_insert( index, tuple, n_ext); *offsets = rec_get_offsets(rec, index, *offsets, - page_is_leaf(cursor->block->frame), + page_is_leaf(cursor->block->frame) + ? index->n_core_fields : 0, ULINT_UNDEFINED, heap); ut_ad(size == rec_offs_size(*offsets)); diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic index c0a3c86c737..b6584177fe4 100644 --- a/storage/innobase/include/page0page.ic +++ b/storage/innobase/include/page0page.ic @@ -1093,7 +1093,7 @@ page_get_instant(const page_t* page) break; } #endif /* UNIV_DEBUG */ - return(i >> 3); + return static_cast<uint16_t>(i >> 3); /* i / 8 */ } #endif /* !UNIV_INNOCHECKSUM */ diff --git a/storage/innobase/include/que0que.h b/storage/innobase/include/que0que.h index c8e1f92e670..f018f73527d 100644 --- a/storage/innobase/include/que0que.h +++ b/storage/innobase/include/que0que.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -303,7 +303,6 @@ que_fork_scheduler_round_robin( /** Query thread states */ enum que_thr_state_t { QUE_THR_RUNNING, - QUE_THR_PROCEDURE_WAIT, /** in selects this means that the thread is at the end of its result set (or start, in case of a scroll cursor); in other statements, this means the thread has done its task */ diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h index 6f6535c529f..34e7c5f1b0f 100644 --- a/storage/innobase/include/rem0rec.h +++ b/storage/innobase/include/rem0rec.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -543,7 +543,7 @@ rec_get_n_extern_new( @param[in] index the index that the record belongs to @param[in,out] offsets array comprising offsets[0] allocated elements, or an array from rec_get_offsets(), or NULL -@param[in] leaf whether this is a leaf-page record +@param[in] n_core 0, or index->n_core_fields for leaf page @param[in] n_fields maximum number of offsets to compute (ULINT_UNDEFINED to compute all offsets) @param[in,out] heap memory heap @@ -553,7 +553,7 @@ rec_get_offsets_func( const rec_t* rec, const dict_index_t* index, rec_offs* offsets, - bool leaf, + ulint n_core, ulint n_fields, #ifdef UNIV_DEBUG const char* file, /*!< in: file name where called */ @@ -1034,12 +1034,14 @@ rec_copy( const rec_offs* offsets); /** Determine the size of a data tuple prefix in a temporary file. +@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format @param[in] index clustered or secondary index @param[in] fields data fields @param[in] n_fields number of data fields @param[out] extra record header size @param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT @return total size, in bytes */ +template<bool redundant_temp> ulint rec_get_converted_size_temp( const dict_index_t* index, @@ -1078,11 +1080,13 @@ rec_init_offsets_temp( MY_ATTRIBUTE((nonnull)); /** Convert a data tuple prefix to the temporary file format. +@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format @param[out] rec record in temporary file format @param[in] index clustered or secondary index @param[in] fields data fields @param[in] n_fields number of data fields @param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT */ +template<bool redundant_temp> void rec_convert_dtuple_to_temp( rec_t* rec, @@ -1175,7 +1179,9 @@ rec_get_converted_size( The fields are copied into the memory heap. @param[out] tuple data tuple @param[in] rec index record, or a copy thereof -@param[in] is_leaf whether rec is a leaf page record +@param[in] index index of rec +@param[in] n_core index->n_core_fields at the time rec was + copied, or 0 if non-leaf page record @param[in] n_fields number of fields to copy @param[in,out] heap memory heap */ void @@ -1183,7 +1189,7 @@ rec_copy_prefix_to_dtuple( dtuple_t* tuple, const rec_t* rec, const dict_index_t* index, - bool is_leaf, + ulint n_core, ulint n_fields, mem_heap_t* heap) MY_ATTRIBUTE((nonnull)); diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h index 34427dc6dc7..9a16394a052 100644 --- a/storage/innobase/include/row0ins.h +++ b/storage/innobase/include/row0ins.h @@ -206,6 +206,7 @@ struct ins_node_t if this is NULL, entry list should be created and buffers for sys fields in row allocated */ void vers_update_end(row_prebuilt_t *prebuilt, bool history_row); + bool vers_history_row() const; /* true if 'row' is historical */ }; /** Create an insert object. diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h index 5ec4b9c1103..93aa5c24230 100644 --- a/storage/innobase/include/row0log.h +++ b/storage/innobase/include/row0log.h @@ -247,6 +247,11 @@ row_log_apply( ut_stage_alter_t* stage) MY_ATTRIBUTE((warn_unused_result)); +/** Get the n_core_fields of online log for the index +@param index index whose n_core_fields of log to be accessed +@return number of n_core_fields */ +unsigned row_log_get_n_core_fields(const dict_index_t *index); + #ifdef HAVE_PSI_STAGE_INTERFACE /** Estimate how much work is to be done by the log apply phase of an ALTER TABLE for this index. diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h index e88380b94e3..3252af0062b 100644 --- a/storage/innobase/include/row0merge.h +++ b/storage/innobase/include/row0merge.h @@ -167,18 +167,20 @@ row_merge_drop_indexes_dict( table_id_t table_id)/*!< in: table identifier */ MY_ATTRIBUTE((nonnull)); -/*********************************************************************//** -Drop those indexes which were created before an error occurred. +/** Drop indexes that were created before an error occurred. The data dictionary must have been locked exclusively by the caller, -because the transaction will not be committed. */ +because the transaction will not be committed. +@param trx dictionary transaction +@param table table containing the indexes +@param locked True if table is locked, + false - may need to do lazy drop +@param alter_trx Alter table transaction */ void row_merge_drop_indexes( -/*===================*/ - trx_t* trx, /*!< in/out: transaction */ - dict_table_t* table, /*!< in/out: table containing the indexes */ - ibool locked) /*!< in: TRUE=table locked, - FALSE=may need to do a lazy drop */ - MY_ATTRIBUTE((nonnull)); + trx_t* trx, + dict_table_t* table, + bool locked, + const trx_t* alter_trx=NULL); /*********************************************************************//** Drop all partially created indexes during crash recovery. */ diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index f018f4eed73..cbb544f60c1 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2019, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -804,12 +804,6 @@ struct row_prebuilt_t { search key values from MySQL format to InnoDB format.*/ uint srch_key_val_len; /*!< Size of search key */ - /** Disable prefetch. */ - bool m_no_prefetch; - - /** Return materialized key for secondary index scan */ - bool m_read_virtual_key; - /** The MySQL table object */ TABLE* m_mysql_table; diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index e1d37613dc9..8dcba6e6bc5 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -3,7 +3,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2019, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -410,8 +410,6 @@ extern double srv_defragment_fill_factor; extern uint srv_defragment_frequency; extern ulonglong srv_defragment_interval; -extern ulong srv_idle_flush_pct; - extern uint srv_change_buffer_max_size; /* Number of IO operations per second the server can do */ @@ -594,9 +592,6 @@ extern struct export_var_t export_vars; /** Global counters */ extern srv_stats_t srv_stats; -/** Simulate compression failures. */ -extern uint srv_simulate_comp_failures; - /** Fatal semaphore wait threshold = maximum number of seconds that semaphore times out in InnoDB */ #define DEFAULT_SRV_FATAL_SEMAPHORE_TIMEOUT 600 diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index 6ba457cdc40..5812c87feeb 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -89,7 +89,6 @@ void trx_write_trx_id(byte* db_trx_id, trx_id_t id) { compile_time_assert(DATA_TRX_ID_LEN == 6); - ut_ad(id); mach_write_to_6(db_trx_id, id); } @@ -847,8 +846,10 @@ public: #endif /** Latest recovered binlog offset */ uint64_t recovered_binlog_offset; - /** Latest recovred binlog file name */ + /** Latest recovered binlog file name */ char recovered_binlog_filename[TRX_SYS_MYSQL_LOG_NAME_LEN]; + /** FIL_PAGE_LSN of the page with the latest recovered binlog metadata */ + lsn_t recovered_binlog_lsn; /** diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index c32234d923d..daffbacdfe6 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -799,6 +799,9 @@ public: /** whether wsrep_on(mysql_thd) held at the start of transaction */ bool wsrep; bool is_wsrep() const { return UNIV_UNLIKELY(wsrep); } + /** true, if BF thread is performing unique secondary index scanning */ + bool wsrep_UK_scan; + bool is_wsrep_UK_scan() const { return UNIV_UNLIKELY(wsrep_UK_scan); } #else /* WITH_WSREP */ bool is_wsrep() const { return false; } #endif /* WITH_WSREP */ diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 99e493acfb4..b66ea937ec2 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h index 430b99d7667..807d99fb872 100644 --- a/storage/innobase/include/ut0ut.h +++ b/storage/innobase/include/ut0ut.h @@ -159,7 +159,7 @@ ut_time_ms(void); store the given number of bits. @param b in: bits @return number of bytes (octets) needed to represent b */ -#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8) +#define UT_BITS_IN_BYTES(b) (((b) + 7) >> 3) /** Determines if a number is zero or a power of two. @param[in] n number diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 81525680a33..e733a6a1d03 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -637,56 +637,82 @@ lock_rec_get_insert_intention( return(lock->type_mode & LOCK_INSERT_INTENTION); } +#ifdef UNIV_DEBUG #ifdef WITH_WSREP -/** Check if both conflicting lock and other record lock are brute force -(BF). This case is a bug so report lock information and wsrep state. -@param[in] lock_rec1 conflicting waiting record lock or NULL -@param[in] lock_rec2 other waiting record lock -@param[in] trx1 lock_rec1 can be NULL, trx +/** Check if both conflicting lock transaction and other transaction +requesting record lock are brute force (BF). If they are check is +this BF-BF wait correct and if not report BF wait and assert. + +@param[in] lock_rec other waiting record lock +@param[in] trx trx requesting conflicting record lock */ -static void wsrep_assert_no_bf_bf_wait( - const lock_t* lock_rec1, - const lock_t* lock_rec2, - const trx_t* trx1) +static void wsrep_assert_no_bf_bf_wait(const lock_t *lock, const trx_t *trx) { - ut_ad(!lock_rec1 || lock_get_type_low(lock_rec1) == LOCK_REC); - ut_ad(lock_get_type_low(lock_rec2) == LOCK_REC); + ut_ad(lock_get_type_low(lock) == LOCK_REC); + ut_ad(lock_mutex_own()); + trx_t* lock_trx= lock->trx; - if (!trx1->is_wsrep() || !lock_rec2->trx->is_wsrep()) - return; - if (UNIV_LIKELY(!wsrep_thd_is_BF(trx1->mysql_thd, FALSE))) + /* Note that we are holding lock_sys->mutex, thus we should + not acquire THD::LOCK_thd_data mutex below to avoid mutexing + order violation. */ + + if (!trx->is_wsrep() || !lock_trx->is_wsrep()) return; - if (UNIV_LIKELY(!wsrep_thd_is_BF(lock_rec2->trx->mysql_thd, FALSE))) + if (UNIV_LIKELY(!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) + || UNIV_LIKELY(!wsrep_thd_is_BF(lock_trx->mysql_thd, FALSE))) return; - mtr_t mtr; + ut_ad(trx->state == TRX_STATE_ACTIVE); + + trx_mutex_enter(lock_trx); + const trx_state_t trx2_state= lock_trx->state; + trx_mutex_exit(lock_trx); + + /* If transaction is already committed in memory or + prepared we should wait. When transaction is committed in + memory we held trx mutex, but not lock_sys->mutex. Therefore, + we could end here before transaction has time to do + lock_release() that is protected with lock_sys->mutex. */ + switch (trx2_state) { + case TRX_STATE_COMMITTED_IN_MEMORY: + case TRX_STATE_PREPARED: + return; + case TRX_STATE_ACTIVE: + break; + default: + ut_ad("invalid state" == 0); + } - if (lock_rec1) { - ib::error() << "Waiting lock on table: " - << lock_rec1->index->table->name - << " index: " - << lock_rec1->index->name() - << " that has conflicting lock "; - lock_rec_print(stderr, lock_rec1, mtr); + /* If BF - BF order is honored, i.e. trx already holding + record lock should be ordered before this new lock request + we can keep trx waiting for the lock. If conflicting + transaction is already aborting or rolling back for replaying + we can also let new transaction waiting. */ + if (wsrep_thd_order_before(lock_trx->mysql_thd, trx->mysql_thd) + || wsrep_thd_is_aborting(lock_trx->mysql_thd)) { + return; } + mtr_t mtr; + ib::error() << "Conflicting lock on table: " - << lock_rec2->index->table->name + << lock->index->table->name << " index: " - << lock_rec2->index->name() + << lock->index->name() << " that has lock "; - lock_rec_print(stderr, lock_rec2, mtr); + lock_rec_print(stderr, lock, mtr); ib::error() << "WSREP state: "; - wsrep_report_bf_lock_wait(trx1->mysql_thd, - trx1->id); - wsrep_report_bf_lock_wait(lock_rec2->trx->mysql_thd, - lock_rec2->trx->id); + wsrep_report_bf_lock_wait(trx->mysql_thd, + trx->id); + wsrep_report_bf_lock_wait(lock_trx->mysql_thd, + lock_trx->id); /* BF-BF wait is a bug */ ut_error; } #endif /* WITH_WSREP */ +#endif /* UNIV_DEBUG */ /*********************************************************************//** Checks if a lock request for a new lock has to wait for request lock2. @@ -714,6 +740,7 @@ lock_rec_has_to_wait( { ut_ad(trx && lock2); ut_ad(lock_get_type_low(lock2) == LOCK_REC); + ut_ad(lock_mutex_own()); if (trx == lock2->trx || lock_mode_compatible( @@ -794,9 +821,25 @@ lock_rec_has_to_wait( } #ifdef WITH_WSREP - /* There should not be two conflicting locks that are - brute force. If there is it is a bug. */ - wsrep_assert_no_bf_bf_wait(NULL, lock2, trx); + /* New lock request from a transaction is using unique key + scan and this transaction is a wsrep high priority transaction + (brute force). If conflicting transaction is also wsrep high + priority transaction we should avoid lock conflict because + ordering of these transactions is already decided and + conflicting transaction will be later replayed. Note + that thread holding conflicting lock can't be + committed or rolled back while we hold + lock_sys->mutex. */ + if (trx->is_wsrep_UK_scan() + && wsrep_thd_is_BF(lock2->trx->mysql_thd, false)) { + return false; + } + + /* We very well can let bf to wait normally as other + BF will be replayed in case of conflict. For debug + builds we will do additional sanity checks to catch + unsupported bf wait if any. */ + ut_d(wsrep_assert_no_bf_bf_wait(lock2, trx)); #endif /* WITH_WSREP */ return true; @@ -1065,65 +1108,31 @@ lock_rec_other_has_expl_req( #endif /* UNIV_DEBUG */ #ifdef WITH_WSREP -static -void -wsrep_kill_victim( -/*==============*/ - const trx_t * const trx, - const lock_t *lock) +static void wsrep_kill_victim(const trx_t * const trx, const lock_t *lock) { ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(lock->trx)); + ut_ad(trx->is_wsrep()); + trx_t* lock_trx = lock->trx; + ut_ad(trx_mutex_own(lock_trx)); + ut_ad(lock_trx != trx); - /* quit for native mysql */ - if (!trx->is_wsrep()) return; - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) return; - } - my_bool bf_other = wsrep_thd_is_BF(lock->trx->mysql_thd, FALSE); - mtr_t mtr; + if (lock_trx->state == TRX_STATE_COMMITTED_IN_MEMORY + || lock_trx->lock.was_chosen_as_deadlock_victim) + return; - if ((!bf_other) || - (wsrep_thd_order_before( - trx->mysql_thd, lock->trx->mysql_thd))) { - - if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - if (UNIV_UNLIKELY(wsrep_debug)) { - ib::info() << "WSREP: BF victim waiting\n"; - } + if (!wsrep_thd_is_BF(lock_trx->mysql_thd, FALSE) + || wsrep_thd_order_before(trx->mysql_thd, lock_trx->mysql_thd)) { + if (lock_trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + if (UNIV_UNLIKELY(wsrep_debug)) + WSREP_INFO("BF victim waiting"); /* cannot release lock, until our lock is in the queue*/ - } else if (lock->trx != trx) { - if (wsrep_log_conflicts) { - ib::info() << "*** Priority TRANSACTION:"; - - trx_print_latched(stderr, trx, 3000); - - if (bf_other) { - ib::info() << "*** Priority TRANSACTION:"; - } else { - ib::info() << "*** Victim TRANSACTION:"; - } - trx_print_latched(stderr, lock->trx, 3000); - - ib::info() << "*** WAITING FOR THIS LOCK TO BE GRANTED:"; - - if (lock_get_type(lock) == LOCK_REC) { - lock_rec_print(stderr, lock, mtr); - } else { - lock_table_print(stderr, lock); - } - - ib::info() << " SQL1: " - << wsrep_thd_query(trx->mysql_thd); - ib::info() << " SQL2: " - << wsrep_thd_query(lock->trx->mysql_thd); - } - + } else { wsrep_innobase_kill_one_trx(trx->mysql_thd, - lock->trx, true); + lock_trx, true); } } } @@ -1454,11 +1463,6 @@ lock_rec_create_low( trx_mutex_exit(c_lock->trx); - if (UNIV_UNLIKELY(wsrep_debug)) { - wsrep_report_bf_lock_wait(trx->mysql_thd, trx->id); - wsrep_report_bf_lock_wait(c_lock->trx->mysql_thd, c_lock->trx->id); - } - /* have to bail out here to avoid lock_set_lock... */ return(lock); } @@ -2222,10 +2226,6 @@ static void lock_rec_dequeue_from_page(lock_t* in_lock) /* Grant the lock */ ut_ad(lock->trx != in_lock->trx); lock_grant(lock); -#ifdef WITH_WSREP - } else { - wsrep_assert_no_bf_bf_wait(c, lock, c->trx); -#endif /* WITH_WSREP */ } } } else { @@ -4178,10 +4178,6 @@ released: /* Grant the lock */ ut_ad(trx != lock->trx); lock_grant(lock); -#ifdef WITH_WSREP - } else { - wsrep_assert_no_bf_bf_wait(c, lock, c->trx); -#endif /* WITH_WSREP */ } } } else { @@ -4237,6 +4233,18 @@ lock_check_dict_lock( and release possible other transactions waiting because of these locks. */ void lock_release(trx_t* trx) { +#ifdef UNIV_DEBUG + std::set<table_id_t> to_evict; + if (innodb_evict_tables_on_commit_debug && !trx->is_recovered) +# if 1 /* if dict_stats_exec_sql() were not playing dirty tricks */ + if (!mutex_own(&dict_sys.mutex)) +# else /* this would be more proper way to do it */ + if (!trx->dict_operation_lock_mode && !trx->dict_operation) +# endif + for (const auto& p : trx->mod_tables) + if (!p.first->is_temporary()) + to_evict.emplace(p.first->id); +#endif ulint count = 0; trx_id_t max_trx_id = trx_sys.get_max_trx_id(); @@ -4285,6 +4293,25 @@ void lock_release(trx_t* trx) } lock_mutex_exit(); + +#ifdef UNIV_DEBUG + if (to_evict.empty()) { + return; + } + mutex_enter(&dict_sys.mutex); + lock_mutex_enter(); + for (table_id_t id : to_evict) { + if (dict_table_t *table = dict_table_open_on_id( + id, TRUE, DICT_TABLE_OP_OPEN_ONLY_IF_CACHED)) { + if (!table->get_ref_count() + && !UT_LIST_GET_LEN(table->locks)) { + dict_sys.remove(table, true); + } + } + } + lock_mutex_exit(); + mutex_exit(&dict_sys.mutex); +#endif } /* True if a lock mode is S or X */ @@ -4454,7 +4481,8 @@ static void lock_rec_print(FILE* file, const lock_t* lock, mtr_t& mtr) ut_ad(!page_rec_is_metadata(rec)); offsets = rec_get_offsets( - rec, lock->index, offsets, true, + rec, lock->index, offsets, + lock->index->n_core_fields, ULINT_UNDEFINED, &heap); putc(' ', file); @@ -5000,8 +5028,8 @@ loop: ut_ad(!lock_rec_get_nth_bit(lock, i) || page_rec_is_leaf(rec)); offsets = rec_get_offsets(rec, lock->index, offsets, - true, ULINT_UNDEFINED, - &heap); + lock->index->n_core_fields, + ULINT_UNDEFINED, &heap); /* If this thread is holding the file space latch (fil_space_t::latch), the following @@ -5332,7 +5360,8 @@ lock_rec_insert_check_and_lock( const rec_offs* offsets; rec_offs_init(offsets_); - offsets = rec_get_offsets(next_rec, index, offsets_, true, + offsets = rec_get_offsets(next_rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &heap); ut_ad(lock_rec_queue_validate( @@ -5640,6 +5669,19 @@ lock_sec_rec_modify_check_and_lock( heap_no = page_rec_get_heap_no(rec); +#ifdef WITH_WSREP + trx_t *trx= thr_get_trx(thr); + /* If transaction scanning an unique secondary key is wsrep + high priority thread (brute force) this scanning may involve + GAP-locking in the index. As this locking happens also when + applying replication events in high priority applier threads, + there is a probability for lock conflicts between two wsrep + high priority threads. To avoid this GAP-locking we mark that + this transaction is using unique key scan here. */ + if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false)) + trx->wsrep_UK_scan= true; +#endif /* WITH_WSREP */ + /* Another transaction cannot have an implicit lock on the record, because when we come here, we already have modified the clustered index record, and this would not have been possible if another active @@ -5648,6 +5690,10 @@ lock_sec_rec_modify_check_and_lock( err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP, block, heap_no, index, thr); +#ifdef WITH_WSREP + trx->wsrep_UK_scan= false; +#endif /* WITH_WSREP */ + #ifdef UNIV_DEBUG { mem_heap_t* heap = NULL; @@ -5655,7 +5701,8 @@ lock_sec_rec_modify_check_and_lock( const rec_offs* offsets; rec_offs_init(offsets_); - offsets = rec_get_offsets(rec, index, offsets_, true, + offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &heap); ut_ad(lock_rec_queue_validate( @@ -5739,9 +5786,26 @@ lock_sec_rec_read_check_and_lock( return DB_SUCCESS; } +#ifdef WITH_WSREP + trx_t *trx= thr_get_trx(thr); + /* If transaction scanning an unique secondary key is wsrep + high priority thread (brute force) this scanning may involve + GAP-locking in the index. As this locking happens also when + applying replication events in high priority applier threads, + there is a probability for lock conflicts between two wsrep + high priority threads. To avoid this GAP-locking we mark that + this transaction is using unique key scan here. */ + if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false)) + trx->wsrep_UK_scan= true; +#endif /* WITH_WSREP */ + err = lock_rec_lock(FALSE, ulint(mode) | gap_mode, block, heap_no, index, thr); +#ifdef WITH_WSREP + trx->wsrep_UK_scan= false; +#endif /* WITH_WSREP */ + ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets)); return(err); @@ -5850,7 +5914,7 @@ lock_clust_rec_read_check_and_lock_alt( rec_offs_init(offsets_); ut_ad(page_rec_is_leaf(rec)); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, &tmp_heap); err = lock_clust_rec_read_check_and_lock(flags, block, rec, index, offsets, mode, gap_mode, thr); @@ -6141,6 +6205,7 @@ lock_cancel_waiting_and_release( ut_ad(lock_mutex_own()); ut_ad(trx_mutex_own(lock->trx)); + ut_ad(lock->trx->state == TRX_STATE_ACTIVE); lock->trx->lock.cancel = true; diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index d7fef4e9675..4359fb6b308 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -972,7 +972,8 @@ fail: DBUG_EXECUTE_IF("log_checksum_mismatch", { cksum = crc + 1; }); if (crc != cksum) { - ib::error() << "Invalid log block checksum." + ib::error_or_warn(srv_operation != SRV_OPERATION_BACKUP) + << "Invalid log block checksum." << " block: " << block_number << " checkpoint no: " << log_block_get_checkpoint_no(buf) @@ -2303,8 +2304,6 @@ void recv_apply_hashed_log_recs(bool last_batch) recv_no_ibuf_operations = !last_batch || is_mariabackup_restore_or_export(); - ut_d(recv_no_log_write = recv_no_ibuf_operations); - if (ulint n = recv_sys.n_addrs) { if (!log_sys.log.subformat && !srv_force_recovery && srv_undo_tablespaces_open) { @@ -2391,7 +2390,7 @@ apply: /* Wait until all the pages have been processed */ - while (recv_sys.n_addrs != 0) { + while (recv_sys.n_addrs || buf_get_n_pending_read_ios()) { const bool abort = recv_sys.found_corrupt_log || recv_sys.found_corrupt_fs; @@ -3871,6 +3870,8 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) mutex_enter(&recv_sys.mutex); recv_sys.apply_log_recs = true; + recv_no_ibuf_operations = is_mariabackup_restore_or_export(); + ut_d(recv_no_log_write = recv_no_ibuf_operations); mutex_exit(&recv_sys.mutex); diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index f96ff6b5171..62908d37337 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2019, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are @@ -2890,7 +2890,6 @@ os_file_create_func( ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE - || type == OS_DATA_TEMP_FILE || type == OS_DATA_FILE_NO_O_DIRECT); ut_a(purpose == OS_FILE_AIO || purpose == OS_FILE_NORMAL); @@ -2938,7 +2937,7 @@ os_file_create_func( /* We disable OS caching (O_DIRECT) only on data files */ if (!read_only && *success - && (type != OS_LOG_FILE && type != OS_DATA_TEMP_FILE + && (type != OS_LOG_FILE && type != OS_DATA_FILE_NO_O_DIRECT) && (srv_file_flush_method == SRV_O_DIRECT || srv_file_flush_method == SRV_O_DIRECT_NO_FSYNC)) { @@ -4137,7 +4136,9 @@ os_file_create_func( case SRV_ALL_O_DIRECT_FSYNC: /*Traditional Windows behavior, no buffering for any files.*/ - attributes |= FILE_FLAG_NO_BUFFERING; + if (type != OS_DATA_FILE_NO_O_DIRECT) { + attributes |= FILE_FLAG_NO_BUFFERING; + } break; case SRV_FSYNC: @@ -7707,7 +7708,7 @@ static bool is_file_on_ssd(char *file_path) /** Determine some file metadata when creating or reading the file. @param file the file that is being created, or OS_FILE_CLOSED */ void fil_node_t::find_metadata(os_file_t file -#ifdef UNIV_LINUX +#ifndef _WIN32 , struct stat* statbuf #endif ) @@ -7747,18 +7748,18 @@ void fil_node_t::find_metadata(os_file_t file block_size = 512; } #else - on_ssd = space->atomic_write_supported; -# ifdef UNIV_LINUX - if (!on_ssd) { - struct stat sbuf; - if (!statbuf && !fstat(file, &sbuf)) { - statbuf = &sbuf; - } - if (statbuf && fil_system.is_ssd(statbuf->st_dev)) { - on_ssd = true; - } + struct stat sbuf; + if (!statbuf && !fstat(file, &sbuf)) { + statbuf = &sbuf; } + if (statbuf) { + block_size = statbuf->st_blksize; + } + on_ssd = space->atomic_write_supported +# ifdef UNIV_LINUX + || (statbuf && fil_system.is_ssd(statbuf->st_dev)) # endif + ; #endif if (!space->atomic_write_supported) { space->atomic_write_supported = atomic_write @@ -7794,7 +7795,6 @@ bool fil_node_t::read_page0(bool first) if (fstat(handle, &statbuf)) { return false; } - block_size = statbuf.st_blksize; os_offset_t size_bytes = statbuf.st_size; #else os_offset_t size_bytes = os_file_get_size(handle); diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc index 0586d6d8a33..9bf9fe66b33 100644 --- a/storage/innobase/page/page0cur.cc +++ b/storage/innobase/page/page0cur.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2018, 2020, MariaDB Corporation. +Copyright (c) 2018, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -75,7 +75,7 @@ page_cur_try_search_shortcut( ut_ad(page_is_leaf(page)); rec = page_header_get_ptr(page, PAGE_LAST_INSERT); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, dtuple_get_n_fields(tuple), &heap); ut_ad(rec); @@ -90,7 +90,8 @@ page_cur_try_search_shortcut( next_rec = page_rec_get_next_const(rec); if (!page_rec_is_supremum(next_rec)) { - offsets = rec_get_offsets(next_rec, index, offsets, true, + offsets = rec_get_offsets(next_rec, index, offsets, + index->n_core_fields, dtuple_get_n_fields(tuple), &heap); if (cmp_dtuple_rec_with_match(tuple, next_rec, offsets, @@ -159,7 +160,7 @@ page_cur_try_search_shortcut_bytes( ut_ad(page_is_leaf(page)); rec = page_header_get_ptr(page, PAGE_LAST_INSERT); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, dtuple_get_n_fields(tuple), &heap); ut_ad(rec); @@ -180,7 +181,8 @@ page_cur_try_search_shortcut_bytes( next_rec = page_rec_get_next_const(rec); if (!page_rec_is_supremum(next_rec)) { - offsets = rec_get_offsets(next_rec, index, offsets, true, + offsets = rec_get_offsets(next_rec, index, offsets, + index->n_core_fields, dtuple_get_n_fields(tuple), &heap); if (cmp_dtuple_rec_with_match_bytes( @@ -321,14 +323,14 @@ page_cur_search_with_match( #endif /* UNIV_ZIP_DEBUG */ ut_d(page_check_dir(page)); - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0; #ifdef BTR_CUR_HASH_ADAPT - if (is_leaf + if (n_core && page_get_direction(page) == PAGE_RIGHT && page_header_get_offs(page, PAGE_LAST_INSERT) && mode == PAGE_CUR_LE - && !dict_index_is_spatial(index) + && !index->is_spatial() && page_header_get_field(page, PAGE_N_DIRECTION) > 3 && page_cur_try_search_shortcut( block, index, tuple, @@ -344,10 +346,10 @@ page_cur_search_with_match( /* If the mode is for R-tree indexes, use the special MBR related compare functions */ - if (dict_index_is_spatial(index) && mode > PAGE_CUR_LE) { + if (index->is_spatial() && mode > PAGE_CUR_LE) { /* For leaf level insert, we still use the traditional compare function for now */ - if (mode == PAGE_CUR_RTREE_INSERT && is_leaf) { + if (mode == PAGE_CUR_RTREE_INSERT && n_core) { mode = PAGE_CUR_LE; } else { rtr_cur_search_with_match( @@ -392,7 +394,7 @@ page_cur_search_with_match( offsets = offsets_; offsets = rec_get_offsets( - mid_rec, index, offsets, is_leaf, + mid_rec, index, offsets, n_core, dtuple_get_n_fields_cmp(tuple), &heap); cmp = cmp_dtuple_rec_with_match( @@ -446,7 +448,7 @@ up_slot_match: offsets = offsets_; offsets = rec_get_offsets( - mid_rec, index, offsets, is_leaf, + mid_rec, index, offsets, n_core, dtuple_get_n_fields_cmp(tuple), &heap); cmp = cmp_dtuple_rec_with_match( @@ -627,7 +629,7 @@ page_cur_search_with_match_bytes( /* Perform binary search until the lower and upper limit directory slots come to the distance 1 of each other */ - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0; while (up - low > 1) { mid = (low + up) / 2; @@ -639,7 +641,7 @@ page_cur_search_with_match_bytes( up_matched_fields, up_matched_bytes); offsets = rec_get_offsets( - mid_rec, index, offsets_, is_leaf, + mid_rec, index, offsets_, n_core, dtuple_get_n_fields_cmp(tuple), &heap); cmp = cmp_dtuple_rec_with_match_bytes( @@ -707,7 +709,7 @@ up_slot_match: } offsets = rec_get_offsets( - mid_rec, index, offsets_, is_leaf, + mid_rec, index, offsets_, n_core, dtuple_get_n_fields_cmp(tuple), &heap); cmp = cmp_dtuple_rec_with_match_bytes( @@ -817,7 +819,8 @@ page_cur_insert_rec_write_log( ut_ad(!page_rec_is_comp(insert_rec) == !dict_table_is_comp(index->table)); - const bool is_leaf = page_rec_is_leaf(cursor_rec); + const ulint n_core = page_rec_is_leaf(cursor_rec) + ? index->n_core_fields : 0; { mem_heap_t* heap = NULL; @@ -831,9 +834,9 @@ page_cur_insert_rec_write_log( rec_offs_init(ins_offs_); cur_offs = rec_get_offsets(cursor_rec, index, cur_offs_, - is_leaf, ULINT_UNDEFINED, &heap); + n_core, ULINT_UNDEFINED, &heap); ins_offs = rec_get_offsets(insert_rec, index, ins_offs_, - is_leaf, ULINT_UNDEFINED, &heap); + n_core, ULINT_UNDEFINED, &heap); extra_size = rec_offs_extra_size(ins_offs); cur_extra_size = rec_offs_extra_size(cur_offs); @@ -1091,9 +1094,9 @@ page_cur_parse_insert_rec( /* Read from the log the inserted index record end segment which differs from the cursor record */ - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0; - offsets = rec_get_offsets(cursor_rec, index, offsets, is_leaf, + offsets = rec_get_offsets(cursor_rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); if (!(end_seg_len & 0x1UL)) { @@ -1142,7 +1145,7 @@ page_cur_parse_insert_rec( page_cur_position(cursor_rec, block, &cursor); offsets = rec_get_offsets(buf + origin_offset, index, offsets, - is_leaf, ULINT_UNDEFINED, &heap); + n_core, ULINT_UNDEFINED, &heap); if (UNIV_UNLIKELY(!page_cur_rec_insert(&cursor, buf + origin_offset, index, offsets, mtr))) { @@ -1323,7 +1326,8 @@ page_cur_insert_rec_low( rec_offs_init(foffsets_); foffsets = rec_get_offsets( - free_rec, index, foffsets, page_is_leaf(page), + free_rec, index, foffsets, + page_is_leaf(page) ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); if (rec_offs_size(foffsets) < rec_size) { if (UNIV_LIKELY_NULL(heap)) { @@ -1736,7 +1740,8 @@ page_cur_insert_rec_zip( rec_offs_init(foffsets_); foffsets = rec_get_offsets(free_rec, index, foffsets, - page_rec_is_leaf(free_rec), + page_rec_is_leaf(free_rec) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap); if (rec_offs_size(foffsets) < rec_size) { too_small: @@ -2097,10 +2102,11 @@ page_copy_rec_list_end_to_created_page( slot_index = 0; n_recs = 0; - const bool is_leaf = page_is_leaf(new_page); + const ulint n_core = page_is_leaf(new_page) + ? index->n_core_fields : 0; do { - offsets = rec_get_offsets(rec, index, offsets, is_leaf, + offsets = rec_get_offsets(rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); insert_rec = rec_copy(heap_top, rec, offsets); @@ -2142,7 +2148,7 @@ page_copy_rec_list_end_to_created_page( heap_top += rec_size; - rec_offs_make_valid(insert_rec, index, is_leaf, offsets); + rec_offs_make_valid(insert_rec, index, n_core != 0, offsets); page_cur_insert_rec_write_log(insert_rec, rec_size, prev_rec, index, mtr); prev_rec = insert_rec; @@ -2279,7 +2285,8 @@ page_cur_parse_delete_rec( page_cur_delete_rec(&cursor, index, rec_get_offsets(rec, index, offsets_, - page_rec_is_leaf(rec), + page_rec_is_leaf(rec) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap), mtr); if (UNIV_LIKELY_NULL(heap)) { diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index ae2cf1870e1..fc33b38beda 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -540,7 +540,8 @@ page_copy_rec_list_end_no_locks( ut_a(page_is_comp(new_page) == page_rec_is_comp(rec)); ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint) (page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM)); - const bool is_leaf = page_is_leaf(block->frame); + const ulint n_core = page_is_leaf(block->frame) + ? index->n_core_fields : 0; cur2 = page_get_infimum_rec(buf_block_get_frame(new_block)); @@ -548,7 +549,7 @@ page_copy_rec_list_end_no_locks( while (!page_cur_is_after_last(&cur1)) { rec_t* ins_rec; - offsets = rec_get_offsets(cur1.rec, index, offsets, is_leaf, + offsets = rec_get_offsets(cur1.rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); ins_rec = page_cur_insert_rec_low(cur2, index, cur1.rec, offsets, mtr); @@ -777,7 +778,7 @@ page_copy_rec_list_start( cur2 = ret; - const bool is_leaf = page_rec_is_leaf(rec); + const ulint n_core = page_rec_is_leaf(rec) ? index->n_core_fields : 0; /* Copy records from the original page to the new page */ if (index->is_spatial()) { @@ -799,7 +800,7 @@ page_copy_rec_list_start( } else { while (page_cur_get_rec(&cur1) != rec) { offsets = rec_get_offsets(cur1.rec, index, offsets, - is_leaf, + n_core, ULINT_UNDEFINED, &heap); cur2 = page_cur_insert_rec_low(cur2, index, cur1.rec, offsets, mtr); @@ -819,7 +820,7 @@ page_copy_rec_list_start( same temp-table in parallel. max_trx_id is ignored for temp tables because it not required for MVCC. */ - if (is_leaf && dict_index_is_sec_or_ibuf(index) + if (n_core && dict_index_is_sec_or_ibuf(index) && !index->table->is_temporary()) { page_update_max_trx_id(new_block, NULL, page_get_max_trx_id(page_align(rec)), @@ -1050,7 +1051,7 @@ delete_all: ? MLOG_COMP_LIST_END_DELETE : MLOG_LIST_END_DELETE, mtr); - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0; if (page_zip) { mtr_log_t log_mode; @@ -1064,7 +1065,7 @@ delete_all: page_cur_t cur; page_cur_position(rec, block, &cur); - offsets = rec_get_offsets(rec, index, offsets, is_leaf, + offsets = rec_get_offsets(rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); rec = rec_get_next_ptr(rec, TRUE); #ifdef UNIV_ZIP_DEBUG @@ -1097,8 +1098,7 @@ delete_all: do { ulint s; - offsets = rec_get_offsets(rec2, index, offsets, - is_leaf, + offsets = rec_get_offsets(rec2, index, offsets, n_core, ULINT_UNDEFINED, &heap); s = rec_offs_size(offsets); ut_ad(ulint(rec2 - page) + s @@ -1244,11 +1244,12 @@ page_delete_rec_list_start( /* Individual deletes are not logged */ mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE); - const bool is_leaf = page_rec_is_leaf(rec); + const ulint n_core = page_rec_is_leaf(rec) + ? index->n_core_fields : 0; while (page_cur_get_rec(&cur1) != rec) { offsets = rec_get_offsets(page_cur_get_rec(&cur1), index, - offsets, is_leaf, + offsets, n_core, ULINT_UNDEFINED, &heap); page_cur_delete_rec(&cur1, index, offsets, mtr); } @@ -2461,9 +2462,10 @@ wrong_page_type: rec = page_get_infimum_rec(page); + const ulint n_core = page_is_leaf(page) ? index->n_core_fields : 0; + for (;;) { - offsets = rec_get_offsets(rec, index, offsets, - page_is_leaf(page), + offsets = rec_get_offsets(rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); if (page_is_comp(page) && page_rec_is_user_rec(rec) @@ -2709,8 +2711,7 @@ n_owned_zero: rec = page_header_get_ptr(page, PAGE_FREE); while (rec != NULL) { - offsets = rec_get_offsets(rec, index, offsets, - page_is_leaf(page), + offsets = rec_get_offsets(rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); if (UNIV_UNLIKELY(!page_rec_validate(rec, offsets))) { ret = FALSE; diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index eb94aad207c..111a400ec92 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -2,7 +2,7 @@ Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -877,7 +877,7 @@ page_zip_compress_node_ptrs( do { const rec_t* rec = *recs++; - offsets = rec_get_offsets(rec, index, offsets, false, + offsets = rec_get_offsets(rec, index, offsets, 0, ULINT_UNDEFINED, &heap); /* Only leaf nodes may contain externally stored columns. */ ut_ad(!rec_offs_any_extern(offsets)); @@ -1126,7 +1126,7 @@ page_zip_compress_clust( do { const rec_t* rec = *recs++; - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_fields, ULINT_UNDEFINED, &heap); ut_ad(rec_offs_n_fields(offsets) == dict_index_get_n_fields(index)); @@ -1349,33 +1349,6 @@ page_zip_compress( MONITOR_INC(MONITOR_PAGE_COMPRESS); - /* Simulate a compression failure with a probability determined by - innodb_simulate_comp_failures, only if the page has 2 or more - records. */ - - if (srv_simulate_comp_failures - && !dict_index_is_ibuf(index) - && page_get_n_recs(page) >= 2 - && ((ulint)(rand() % 100) < srv_simulate_comp_failures) - && strcmp(index->table->name.m_name, "IBUF_DUMMY")) { - -#ifdef UNIV_DEBUG - ib::error() - << "Simulating a compression failure" - << " for table " << index->table->name - << " index " - << index->name() - << " page " - << page_get_page_no(page) - << "(" - << (page_is_leaf(page) ? "leaf" : "non-leaf") - << ")"; - -#endif - - goto err_exit; - } - heap = mem_heap_create(page_zip_get_size(page_zip) + n_fields * (2 + sizeof(ulint)) + REC_OFFS_HEADER_SIZE @@ -2032,7 +2005,7 @@ page_zip_apply_log( sorted by address (indexed by heap_no - PAGE_HEAP_NO_USER_LOW) */ ulint n_dense,/*!< in: size of recs[] */ - bool is_leaf,/*!< in: whether this is a leaf page */ + ulint n_core, /*!< in: index->n_fields, or 0 for non-leaf */ ulint trx_id_col,/*!< in: column number of trx_id in the index, or ULINT_UNDEFINED if none */ ulint heap_status, @@ -2108,7 +2081,7 @@ page_zip_apply_log( /* Clear the data bytes of the record. */ mem_heap_t* heap = NULL; rec_offs* offs; - offs = rec_get_offsets(rec, index, offsets, is_leaf, + offs = rec_get_offsets(rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); memset(rec, 0, rec_offs_data_size(offs)); @@ -2126,7 +2099,7 @@ page_zip_apply_log( This will be overwritten in page_zip_set_extra_bytes(), called by page_zip_decompress_low(). */ ut_d(rec[-REC_NEW_INFO_BITS] = 0); - rec_offs_make_valid(rec, index, is_leaf, offsets); + rec_offs_make_valid(rec, index, n_core != 0, offsets); /* Copy the extra bytes (backwards). */ { @@ -2306,7 +2279,7 @@ page_zip_decompress_node_ptrs( } /* Read the offsets. The status bits are needed here. */ - offsets = rec_get_offsets(rec, index, offsets, false, + offsets = rec_get_offsets(rec, index, offsets, 0, ULINT_UNDEFINED, &heap); /* Non-leaf nodes should not have any externally @@ -2393,7 +2366,7 @@ zlib_done: const byte* mod_log_ptr; mod_log_ptr = page_zip_apply_log(d_stream->next_in, d_stream->avail_in + 1, - recs, n_dense, false, + recs, n_dense, 0, ULINT_UNDEFINED, heap_status, index, offsets); @@ -2424,7 +2397,7 @@ zlib_done: for (slot = 0; slot < n_dense; slot++) { rec_t* rec = recs[slot]; - offsets = rec_get_offsets(rec, index, offsets, false, + offsets = rec_get_offsets(rec, index, offsets, 0, ULINT_UNDEFINED, &heap); /* Non-leaf nodes should not have any externally stored columns. */ @@ -2546,7 +2519,8 @@ zlib_done: const byte* mod_log_ptr; mod_log_ptr = page_zip_apply_log(d_stream->next_in, d_stream->avail_in + 1, - recs, n_dense, true, + recs, n_dense, + index->n_fields, ULINT_UNDEFINED, heap_status, index, offsets); @@ -2749,7 +2723,7 @@ page_zip_decompress_clust( } /* Read the offsets. The status bits are needed here. */ - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_fields, ULINT_UNDEFINED, &heap); /* This is a leaf page in a clustered index. */ @@ -2876,7 +2850,8 @@ zlib_done: const byte* mod_log_ptr; mod_log_ptr = page_zip_apply_log(d_stream->next_in, d_stream->avail_in + 1, - recs, n_dense, true, + recs, n_dense, + index->n_fields, trx_id_col, heap_status, index, offsets); @@ -2912,7 +2887,7 @@ zlib_done: rec_t* rec = recs[slot]; bool exists = !page_zip_dir_find_free( page_zip, page_offset(rec)); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_fields, ULINT_UNDEFINED, &heap); dst = rec_get_nth_field(rec, offsets, @@ -3436,7 +3411,7 @@ page_zip_validate_low( page + PAGE_NEW_INFIMUM, TRUE); trec = page_rec_get_next_low( temp_page + PAGE_NEW_INFIMUM, TRUE); - const bool is_leaf = page_is_leaf(page); + const ulint n_core = page_is_leaf(page) ? index->n_fields : 0; do { if (page_offset(rec) != page_offset(trec)) { @@ -3451,7 +3426,7 @@ page_zip_validate_low( if (index) { /* Compare the data. */ offsets = rec_get_offsets( - rec, index, offsets, is_leaf, + rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); if (memcmp(rec - rec_offs_extra_size(offsets), diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc index ebfe7ada3b1..a27411c3d8b 100644 --- a/storage/innobase/pars/pars0pars.cc +++ b/storage/innobase/pars/pars0pars.cc @@ -1219,6 +1219,7 @@ pars_update_statement( sel_node->row_lock_mode = LOCK_X; } else { node->has_clust_rec_x_lock = sel_node->set_x_locks; + ut_ad(node->has_clust_rec_x_lock); } ut_a(sel_node->n_tables == 1); diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc index 3f4810dcc0e..e98d50ea0fc 100644 --- a/storage/innobase/que/que0que.cc +++ b/storage/innobase/que/que0que.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2018, 2020 MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -349,7 +349,6 @@ que_fork_start_command( case QUE_THR_RUNNING: case QUE_THR_LOCK_WAIT: - case QUE_THR_PROCEDURE_WAIT: ut_error; } } diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index b3c2fc84231..581637be073 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -29,6 +29,7 @@ Created 5/30/1994 Heikki Tuuri #include "mtr0log.h" #include "fts0fts.h" #include "trx0sys.h" +#include "row0log.h" /* PHYSICAL RECORD (OLD STYLE) =========================== @@ -272,9 +273,9 @@ rec_init_offsets_comp_ordinary( ulint n_fields = n_core; ulint null_mask = 1; - ut_ad(index->n_core_fields >= n_core); ut_ad(n_core > 0); - ut_ad(index->n_fields >= n_core); + ut_ad(index->n_core_fields >= n_core); + ut_ad(index->n_fields >= index->n_core_fields); ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable)); ut_ad(format == REC_LEAF_TEMP || format == REC_LEAF_TEMP_INSTANT || dict_table_is_comp(index->table)); @@ -282,6 +283,11 @@ rec_init_offsets_comp_ordinary( || index->n_fields == rec_offs_n_fields(offsets)); ut_d(ulint n_null= 0); + const unsigned n_core_null_bytes = UNIV_UNLIKELY(index->n_core_fields + != n_core) + ? UT_BITS_IN_BYTES(unsigned(index->get_n_nullable(n_core))) + : index->n_core_null_bytes; + if (mblob) { ut_ad(index->is_dummy || index->table->instant); ut_ad(index->is_dummy || index->is_instant()); @@ -296,7 +302,7 @@ rec_init_offsets_comp_ordinary( const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable); ut_d(n_null = n_nullable); ut_ad(n_null <= index->n_nullable); - ut_ad(n_null_bytes >= index->n_core_null_bytes + ut_ad(n_null_bytes >= n_core_null_bytes || n_core < index->n_core_fields); lens = --nulls - n_null_bytes; goto start; @@ -313,9 +319,9 @@ rec_init_offsets_comp_ordinary( case REC_LEAF_ORDINARY: nulls -= REC_N_NEW_EXTRA_BYTES; ordinary: - lens = --nulls - index->n_core_null_bytes; + lens = --nulls - n_core_null_bytes; - ut_d(n_null = std::min<uint>(index->n_core_null_bytes * 8U, + ut_d(n_null = std::min<uint>(n_core_null_bytes * 8U, index->n_nullable)); break; case REC_LEAF_INSTANT: @@ -329,7 +335,7 @@ ordinary: const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable); ut_d(n_null = n_nullable); ut_ad(n_null <= index->n_nullable); - ut_ad(n_null_bytes >= index->n_core_null_bytes + ut_ad(n_null_bytes >= n_core_null_bytes || n_core < index->n_core_fields); lens = --nulls - n_null_bytes; } @@ -583,14 +589,14 @@ is (SQL_NULL), the field i is NULL. When the type of the offset at [i+1] is (STORED_OFFPAGE), the field i is stored externally. @param[in] rec record @param[in] index the index that the record belongs in -@param[in] leaf whether the record resides in a leaf page +@param[in] n_core 0, or index->n_core_fields for leaf page @param[in,out] offsets array of offsets, with valid rec_offs_n_fields() */ static void rec_init_offsets( const rec_t* rec, const dict_index_t* index, - bool leaf, + ulint n_core, rec_offs* offsets) { ulint i = 0; @@ -605,6 +611,8 @@ rec_init_offsets( || index->in_instant_init); ut_d(memcpy(&offsets[RECORD_OFFSET], &rec, sizeof(rec))); ut_d(memcpy(&offsets[INDEX_OFFSET], &index, sizeof(index))); + ut_ad(index->n_fields >= n_core); + ut_ad(index->n_core_fields >= n_core); if (dict_table_is_comp(index->table)) { const byte* nulls; @@ -623,23 +631,21 @@ rec_init_offsets( rec_offs_base(offsets)[1] = 8; return; case REC_STATUS_NODE_PTR: - ut_ad(!leaf); + ut_ad(!n_core); n_node_ptr_field = dict_index_get_n_unique_in_tree_nonleaf( index); break; case REC_STATUS_INSTANT: - ut_ad(leaf); ut_ad(index->is_instant()); rec_init_offsets_comp_ordinary(rec, index, offsets, - index->n_core_fields, + n_core, NULL, REC_LEAF_INSTANT); return; case REC_STATUS_ORDINARY: - ut_ad(leaf); rec_init_offsets_comp_ordinary(rec, index, offsets, - index->n_core_fields, + n_core, NULL, REC_LEAF_ORDINARY); return; @@ -796,7 +802,7 @@ resolved: @param[in] index the index that the record belongs to @param[in,out] offsets array comprising offsets[0] allocated elements, or an array from rec_get_offsets(), or NULL -@param[in] leaf whether this is a leaf-page record +@param[in] n_core 0, or index->n_core_fields for leaf page @param[in] n_fields maximum number of offsets to compute (ULINT_UNDEFINED to compute all offsets) @param[in,out] heap memory heap @@ -806,7 +812,7 @@ rec_get_offsets_func( const rec_t* rec, const dict_index_t* index, rec_offs* offsets, - bool leaf, + ulint n_core, ulint n_fields, #ifdef UNIV_DEBUG const char* file, /*!< in: file name where called */ @@ -818,6 +824,15 @@ rec_get_offsets_func( ulint size; bool alter_metadata = false; + ut_ad(index->n_core_fields >= n_core); + /* This assertion was relaxed for the btr_cur_open_at_index_side() + call in btr_cur_instant_init_low(). We cannot invoke + index->is_instant(), because the same assertion would fail there + until btr_cur_instant_init_low() has invoked + dict_table_t::deserialise_columns(). */ + ut_ad(index->n_fields >= index->n_core_fields + || index->in_instant_init); + if (dict_table_is_comp(index->table)) { switch (UNIV_EXPECT(rec_get_status(rec), REC_STATUS_ORDINARY)) { @@ -825,14 +840,14 @@ rec_get_offsets_func( alter_metadata = rec_is_alter_metadata(rec, true); /* fall through */ case REC_STATUS_ORDINARY: - ut_ad(leaf); + ut_ad(n_core); n = dict_index_get_n_fields(index) + alter_metadata; break; case REC_STATUS_NODE_PTR: /* Node pointer records consist of the uniquely identifying fields of the record followed by a child page number field. */ - ut_ad(!leaf); + ut_ad(!n_core); n = dict_index_get_n_unique_in_tree_nonleaf(index) + 1; break; case REC_STATUS_INFIMUM: @@ -861,19 +876,19 @@ rec_get_offsets_func( >= PAGE_HEAP_NO_USER_LOW; /* The infimum and supremum records carry 1 field. */ ut_ad(is_user_rec || n == 1); - ut_ad(!is_user_rec || leaf || index->is_dummy + ut_ad(!is_user_rec || n_core || index->is_dummy || dict_index_is_ibuf(index) || n == n_fields /* dict_stats_analyze_index_level() */ || n == dict_index_get_n_unique_in_tree_nonleaf(index) + 1); - ut_ad(!is_user_rec || !leaf || index->is_dummy + ut_ad(!is_user_rec || !n_core || index->is_dummy || dict_index_is_ibuf(index) || n == n_fields /* btr_pcur_restore_position() */ || (n + (index->id == DICT_INDEXES_ID) - >= index->n_core_fields && n <= index->n_fields + >= n_core && n <= index->n_fields + unsigned(rec_is_alter_metadata(rec, false)))); - if (is_user_rec && leaf && n < index->n_fields) { + if (is_user_rec && n_core && n < index->n_fields) { ut_ad(!index->is_dummy); ut_ad(!dict_index_is_ibuf(index)); n = index->n_fields; @@ -907,17 +922,17 @@ rec_get_offsets_func( memcpy(&offsets[RECORD_OFFSET], &rec, sizeof rec); memcpy(&offsets[INDEX_OFFSET], &index, sizeof index); #endif /* UNIV_DEBUG */ - ut_ad(leaf); + ut_ad(n_core); ut_ad(index->is_dummy || index->table->instant); ut_ad(index->is_dummy || index->is_instant()); ut_ad(rec_offs_n_fields(offsets) <= ulint(index->n_fields) + 1); rec_init_offsets_comp_ordinary<true>(rec, index, offsets, index->n_core_fields, - NULL, + nullptr, REC_LEAF_INSTANT); } else { - rec_init_offsets(rec, index, leaf, offsets); + rec_init_offsets(rec, index, n_core, offsets); } return offsets; } @@ -1094,7 +1109,8 @@ rec_get_nth_field_offs_old( } /** Determine the size of a data tuple prefix in ROW_FORMAT=COMPACT. -@tparam mblob whether the record includes a metadata BLOB +@tparam mblob whether the record includes a metadata BLOB +@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format @param[in] index record descriptor; dict_table_is_comp() is assumed to hold, even if it doesn't @param[in] dfield array of data fields @@ -1103,7 +1119,7 @@ rec_get_nth_field_offs_old( @param[in] status status flags @param[in] temp whether this is a temporary file record @return total size */ -template<bool mblob = false> +template<bool mblob = false, bool redundant_temp = false> static inline ulint rec_get_converted_size_comp_prefix_low( @@ -1120,25 +1136,27 @@ rec_get_converted_size_comp_prefix_low( ut_d(ulint n_null = index->n_nullable); ut_ad(status == REC_STATUS_ORDINARY || status == REC_STATUS_NODE_PTR || status == REC_STATUS_INSTANT); + unsigned n_core_fields = redundant_temp + ? row_log_get_n_core_fields(index) + : index->n_core_fields; if (mblob) { - ut_ad(!temp); ut_ad(index->table->instant); - ut_ad(index->is_instant()); + ut_ad(!redundant_temp && index->is_instant()); ut_ad(status == REC_STATUS_INSTANT); ut_ad(n_fields == ulint(index->n_fields) + 1); extra_size += UT_BITS_IN_BYTES(index->n_nullable) + rec_get_n_add_field_len(n_fields - 1 - - index->n_core_fields); + - n_core_fields); } else if (status == REC_STATUS_INSTANT - && (!temp || n_fields > index->n_core_fields)) { - ut_ad(index->is_instant()); + && (!temp || n_fields > n_core_fields)) { + if (!redundant_temp) { ut_ad(index->is_instant()); } ut_ad(UT_BITS_IN_BYTES(n_null) >= index->n_core_null_bytes); extra_size += UT_BITS_IN_BYTES(index->get_n_nullable(n_fields)) + rec_get_n_add_field_len(n_fields - 1 - - index->n_core_fields); + - n_core_fields); } else { - ut_ad(n_fields <= index->n_core_fields); + ut_ad(n_fields <= n_core_fields); extra_size += index->n_core_null_bytes; } @@ -1442,8 +1460,9 @@ rec_convert_dtuple_to_rec_old( /* If the data is not SQL null, store it */ len = dfield_get_len(field); - memcpy(rec + end_offset, - dfield_get_data(field), len); + if (len) + memcpy(rec + end_offset, + dfield_get_data(field), len); end_offset += len; ored_offset = end_offset; @@ -1470,8 +1489,9 @@ rec_convert_dtuple_to_rec_old( /* If the data is not SQL null, store it */ len = dfield_get_len(field); - memcpy(rec + end_offset, - dfield_get_data(field), len); + if (len) + memcpy(rec + end_offset, + dfield_get_data(field), len); end_offset += len; ored_offset = end_offset; @@ -1489,7 +1509,8 @@ rec_convert_dtuple_to_rec_old( } /** Convert a data tuple into a ROW_FORMAT=COMPACT record. -@tparam mblob whether the record includes a metadata BLOB +@tparam mblob whether the record includes a metadata BLOB +@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format @param[out] rec converted record @param[in] index index @param[in] field data fields to convert @@ -1497,7 +1518,7 @@ rec_convert_dtuple_to_rec_old( @param[in] status rec_get_status(rec) @param[in] temp whether to use the format for temporary files in index creation */ -template<bool mblob = false> +template<bool mblob = false, bool redundant_temp = false> static inline void rec_convert_dtuple_to_rec_comp( @@ -1514,7 +1535,9 @@ rec_convert_dtuple_to_rec_comp( byte* UNINIT_VAR(lens); ulint UNINIT_VAR(n_node_ptr_field); ulint null_mask = 1; - + const ulint n_core_fields = redundant_temp + ? row_log_get_n_core_fields(index) + : index->n_core_fields; ut_ad(n_fields > 0); ut_ad(temp || dict_table_is_comp(index->table)); ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable)); @@ -1524,11 +1547,10 @@ rec_convert_dtuple_to_rec_comp( if (mblob) { ut_ad(!temp); ut_ad(index->table->instant); - ut_ad(index->is_instant()); + ut_ad(!redundant_temp && index->is_instant()); ut_ad(status == REC_STATUS_INSTANT); ut_ad(n_fields == ulint(index->n_fields) + 1); - rec_set_n_add_field(nulls, n_fields - 1 - - index->n_core_fields); + rec_set_n_add_field(nulls, n_fields - 1 - n_core_fields); rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW); rec_set_status(rec, REC_STATUS_INSTANT); n_node_ptr_field = ULINT_UNDEFINED; @@ -1537,20 +1559,17 @@ rec_convert_dtuple_to_rec_comp( } switch (status) { case REC_STATUS_INSTANT: - ut_ad(index->is_instant()); - ut_ad(n_fields > index->n_core_fields); - rec_set_n_add_field(nulls, n_fields - 1 - - index->n_core_fields); + if (!redundant_temp) { ut_ad(index->is_instant()); } + ut_ad(n_fields > n_core_fields); + rec_set_n_add_field(nulls, n_fields - 1 - n_core_fields); /* fall through */ case REC_STATUS_ORDINARY: ut_ad(n_fields <= dict_index_get_n_fields(index)); if (!temp) { rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW); - - rec_set_status( - rec, n_fields == index->n_core_fields - ? REC_STATUS_ORDINARY - : REC_STATUS_INSTANT); + rec_set_status(rec, n_fields == n_core_fields + ? REC_STATUS_ORDINARY + : REC_STATUS_INSTANT); } if (dict_table_is_comp(index->table)) { @@ -1768,12 +1787,14 @@ rec_convert_dtuple_to_rec( } /** Determine the size of a data tuple prefix in a temporary file. +@tparam redundant_temp whether to use the ROW_FORMAT=REDUNDANT format @param[in] index clustered or secondary index @param[in] fields data fields @param[in] n_fields number of data fields @param[out] extra record header size @param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT @return total size, in bytes */ +template<bool redundant_temp> ulint rec_get_converted_size_temp( const dict_index_t* index, @@ -1782,10 +1803,18 @@ rec_get_converted_size_temp( ulint* extra, rec_comp_status_t status) { - return rec_get_converted_size_comp_prefix_low( + return rec_get_converted_size_comp_prefix_low<false,redundant_temp>( index, fields, n_fields, extra, status, true); } +template ulint rec_get_converted_size_temp<false>( + const dict_index_t*, const dfield_t*, ulint, ulint*, + rec_comp_status_t); + +template ulint rec_get_converted_size_temp<true>( + const dict_index_t*, const dfield_t*, ulint, ulint*, + rec_comp_status_t); + /** Determine the offset to each field in temporary file. @param[in] rec temporary file record @param[in] index index of that the record belongs to @@ -1838,6 +1867,7 @@ rec_init_offsets_temp( @param[in] n_fields number of data fields @param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT */ +template<bool redundant_temp> void rec_convert_dtuple_to_temp( rec_t* rec, @@ -1846,15 +1876,25 @@ rec_convert_dtuple_to_temp( ulint n_fields, rec_comp_status_t status) { - rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields, - status, true); + rec_convert_dtuple_to_rec_comp<false,redundant_temp>( + rec, index, fields, n_fields, status, true); } +template void rec_convert_dtuple_to_temp<false>( + rec_t*, const dict_index_t*, const dfield_t*, + ulint, rec_comp_status_t); + +template void rec_convert_dtuple_to_temp<true>( + rec_t*, const dict_index_t*, const dfield_t*, + ulint, rec_comp_status_t); + /** Copy the first n fields of a (copy of a) physical record to a data tuple. The fields are copied into the memory heap. @param[out] tuple data tuple @param[in] rec index record, or a copy thereof -@param[in] is_leaf whether rec is a leaf page record +@param[in] index index of rec +@param[in] n_core index->n_core_fields at the time rec was + copied, or 0 if non-leaf page record @param[in] n_fields number of fields to copy @param[in,out] heap memory heap */ void @@ -1862,7 +1902,7 @@ rec_copy_prefix_to_dtuple( dtuple_t* tuple, const rec_t* rec, const dict_index_t* index, - bool is_leaf, + ulint n_core, ulint n_fields, mem_heap_t* heap) { @@ -1870,10 +1910,11 @@ rec_copy_prefix_to_dtuple( rec_offs* offsets = offsets_; rec_offs_init(offsets_); - ut_ad(is_leaf || n_fields + ut_ad(n_core <= index->n_core_fields); + ut_ad(n_core || n_fields <= dict_index_get_n_unique_in_tree_nonleaf(index) + 1); - offsets = rec_get_offsets(rec, index, offsets, is_leaf, + offsets = rec_get_offsets(rec, index, offsets, n_core, n_fields, &heap); ut_ad(rec_validate(rec, offsets)); @@ -2513,7 +2554,8 @@ rec_print( rec_print_new(file, rec, rec_get_offsets(rec, index, offsets_, - page_rec_is_leaf(rec), + page_rec_is_leaf(rec) + ? index->n_core_fields : 0, ULINT_UNDEFINED, &heap)); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); @@ -2589,7 +2631,8 @@ operator<<(std::ostream& o, const rec_index_print& r) { mem_heap_t* heap = NULL; rec_offs* offsets = rec_get_offsets( - r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec), + r.m_rec, r.m_index, NULL, page_rec_is_leaf(r.m_rec) + ? r.m_index->n_core_fields : 0, ULINT_UNDEFINED, &heap); rec_print(o, r.m_rec, rec_get_info_bits(r.m_rec, rec_offs_comp(offsets)), @@ -2628,7 +2671,7 @@ rec_get_trx_id( rec_offs_init(offsets_); rec_offs* offsets = offsets_; - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, index->db_trx_id() + 1, &heap); trx_id = rec_get_nth_field(rec, offsets, index->db_trx_id(), &len); @@ -2679,7 +2722,8 @@ wsrep_rec_get_foreign_key( ut_ad(index_ref); rec_offs_init(offsets_); - offsets = rec_get_offsets(rec, index_for, offsets_, true, + offsets = rec_get_offsets(rec, index_for, offsets_, + index_for->n_core_fields, ULINT_UNDEFINED, &heap); ut_ad(rec_offs_validate(rec, NULL, offsets)); @@ -2761,9 +2805,24 @@ wsrep_rec_get_foreign_key( break; case DATA_BLOB: case DATA_BINARY: + case DATA_FIXBINARY: + case DATA_GEOMETRY: memcpy(buf, data, len); break; - default: + + case DATA_FLOAT: + { + float f = mach_float_read(data); + memcpy(buf, &f, sizeof(float)); + } + break; + case DATA_DOUBLE: + { + double d = mach_double_read(data); + memcpy(buf, &d, sizeof(double)); + } + break; + default: break; } diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc index f8f751fa746..ae6e6d05d80 100644 --- a/storage/innobase/row/row0ftsort.cc +++ b/storage/innobase/row/row0ftsort.cc @@ -925,7 +925,7 @@ loop: << " records, the sort queue has " << UT_LIST_GET_LEN(psort_info->fts_doc_list) << " records. But sort cannot get the next" - " records"; + " records during alter table " << table->name; goto exit; } } else if (psort_info->state == FTS_PARENT_EXITING) { @@ -1221,7 +1221,9 @@ row_merge_write_fts_word( if (UNIV_UNLIKELY(error != DB_SUCCESS)) { ib::error() << "Failed to write word to FTS auxiliary" - " index table, error " << error; + " index table " + << ins_ctx->btr_bulk->table_name() + << ", error " << error; ret = error; } diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 2468b133387..7c56713a6c1 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1818,7 +1818,8 @@ PageConverter::update_records( if (deleted || clust_index) { m_offsets = rec_get_offsets( - rec, m_index->m_srv_index, m_offsets, true, + rec, m_index->m_srv_index, m_offsets, + m_index->m_srv_index->n_core_fields, ULINT_UNDEFINED, &m_heap); } @@ -3351,6 +3352,66 @@ struct fil_iterator_t { byte* crypt_io_buffer; /*!< IO buffer when encrypted */ }; + +/** InnoDB writes page by page when there is page compressed +tablespace involved. It does help to save the disk space when +punch hole is enabled +@param iter Tablespace iterator +@param full_crc32 whether the file is in the full_crc32 format +@param write_request Request to write into the file +@param offset offset of the file to be written +@param writeptr buffer to be written +@param n_bytes number of bytes to be written +@param try_punch_only Try the range punch only because the + current range is full of empty pages +@return DB_SUCCESS */ +static +dberr_t fil_import_compress_fwrite(const fil_iterator_t &iter, + bool full_crc32, + const IORequest &write_request, + os_offset_t offset, + const byte *writeptr, + ulint n_bytes, + bool try_punch_only= false) +{ + if (dberr_t err= os_file_punch_hole(iter.file, offset, n_bytes)) + return err; + + if (try_punch_only) + return DB_SUCCESS; + + for (ulint j= 0; j < n_bytes; j+= srv_page_size) + { + /* Read the original data length from block and + safer to read FIL_PAGE_COMPRESSED_SIZE because it + is not encrypted*/ + ulint n_write_bytes= srv_page_size; + if (j || offset) + { + n_write_bytes= mach_read_from_2(writeptr + j + FIL_PAGE_DATA); + const unsigned ptype= mach_read_from_2(writeptr + j + FIL_PAGE_TYPE); + /* Ignore the empty page */ + if (ptype == 0 && n_write_bytes == 0) + continue; + if (full_crc32) + n_write_bytes= buf_page_full_crc32_size(writeptr + j, + nullptr, nullptr); + else + { + n_write_bytes+= ptype == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + ? FIL_PAGE_DATA + FIL_PAGE_ENCRYPT_COMP_METADATA_LEN + : FIL_PAGE_DATA + FIL_PAGE_COMP_METADATA_LEN; + } + } + + if (dberr_t err= os_file_write(write_request, iter.filepath, iter.file, + writeptr + j, offset + j, n_write_bytes)) + return err; + } + + return DB_SUCCESS; +} + /********************************************************************//** TODO: This can be made parallel trivially by chunking up the file and creating a callback per thread. . Main benefit will be to use multiple CPUs for @@ -3396,7 +3457,10 @@ fil_iterate( /* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ - dberr_t err = DB_SUCCESS; + dberr_t err = DB_SUCCESS; + bool page_compressed = false; + bool punch_hole = true; + const IORequest write_request(IORequest::WRITE); for (offset = iter.start; offset < iter.end; offset += n_bytes) { if (callback.is_interrupted()) { @@ -3474,7 +3538,7 @@ page_corrupted: src + FIL_PAGE_SPACE_ID); } - const bool page_compressed = + page_compressed = (full_crc32 && fil_space_t::is_compressed( callback.get_space_flags()) @@ -3667,13 +3731,23 @@ not_encrypted: } } - /* A page was updated in the set, write back to disk. */ - if (updated) { - IORequest write_request(IORequest::WRITE); + if (page_compressed && punch_hole) { + err = fil_import_compress_fwrite( + iter, full_crc32, write_request, offset, + writeptr, n_bytes, !updated); - err = os_file_write(write_request, - iter.filepath, iter.file, - writeptr, offset, n_bytes); + if (err != DB_SUCCESS) { + punch_hole = false; + if (updated) { + goto normal_write; + } + } + } else if (updated) { + /* A page was updated in the set, write back to disk. */ +normal_write: + err = os_file_write( + write_request, iter.filepath, iter.file, + writeptr, offset, n_bytes); if (err != DB_SUCCESS) { goto func_exit; diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 8f3024c2d48..3d3407e3a88 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, 2020, MariaDB Corporation. +Copyright (c) 2016, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -883,7 +883,7 @@ row_ins_foreign_fill_virtual( rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); const rec_offs* offsets = - rec_get_offsets(rec, index, offsets_, true, + rec_get_offsets(rec, index, offsets_, index->n_core_fields, ULINT_UNDEFINED, &cascade->heap); TABLE* mysql_table= NULL; upd_t* update = cascade->update; @@ -894,7 +894,7 @@ row_ins_foreign_fill_virtual( update->old_vrow = row_build( ROW_COPY_DATA, index, rec, offsets, index->table, NULL, NULL, - &ext, cascade->heap); + &ext, update->heap); n_diff = update->n_fields; if (index->table->vc_templ == NULL) { @@ -1197,7 +1197,8 @@ row_ins_foreign_check_on_constraint( if (table->fts) { doc_id = fts_get_doc_id_from_rec( clust_rec, clust_index, - rec_get_offsets(clust_rec, clust_index, NULL, true, + rec_get_offsets(clust_rec, clust_index, NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &tmp_heap)); } @@ -1639,7 +1640,8 @@ row_ins_check_foreign_constraint( continue; } - offsets = rec_get_offsets(rec, check_index, offsets, true, + offsets = rec_get_offsets(rec, check_index, offsets, + check_index->n_core_fields, ULINT_UNDEFINED, &heap); if (page_rec_is_supremum(rec)) { @@ -2127,7 +2129,8 @@ row_ins_scan_sec_index_for_duplicate( continue; } - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &offsets_heap); if (flags & BTR_NO_LOCKING_FLAG) { @@ -2264,7 +2267,8 @@ row_ins_duplicate_error_in_clust_online( ut_ad(!cursor->index->is_instant()); if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) { - *offsets = rec_get_offsets(rec, cursor->index, *offsets, true, + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + cursor->index->n_fields, ULINT_UNDEFINED, heap); err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets); if (err != DB_SUCCESS) { @@ -2275,7 +2279,8 @@ row_ins_duplicate_error_in_clust_online( rec = page_rec_get_next_const(btr_cur_get_rec(cursor)); if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) { - *offsets = rec_get_offsets(rec, cursor->index, *offsets, true, + *offsets = rec_get_offsets(rec, cursor->index, *offsets, + cursor->index->n_fields, ULINT_UNDEFINED, heap); err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets); } @@ -2331,7 +2336,7 @@ row_ins_duplicate_error_in_clust( if (!page_rec_is_infimum(rec)) { offsets = rec_get_offsets(rec, cursor->index, offsets, - true, + cursor->index->n_core_fields, ULINT_UNDEFINED, &heap); /* We set a lock on the possible duplicate: this @@ -2374,6 +2379,18 @@ row_ins_duplicate_error_in_clust( duplicate: trx->error_info = cursor->index; err = DB_DUPLICATE_KEY; + if (cursor->index->table->versioned() + && entry->vers_history_row()) + { + ulint trx_id_len; + byte *trx_id = rec_get_nth_field( + rec, offsets, n_unique, + &trx_id_len); + ut_ad(trx_id_len == DATA_TRX_ID_LEN); + if (trx->id == trx_read_trx_id(trx_id)) { + err = DB_FOREIGN_DUPLICATE_KEY; + } + } goto func_exit; } } @@ -2385,7 +2402,7 @@ duplicate: if (!page_rec_is_supremum(rec)) { offsets = rec_get_offsets(rec, cursor->index, offsets, - true, + cursor->index->n_core_fields, ULINT_UNDEFINED, &heap); if (trx->duplicates) { @@ -2502,7 +2519,7 @@ row_ins_index_entry_big_rec( btr_pcur_open(index, entry, PAGE_CUR_LE, BTR_MODIFY_TREE, &pcur, &mtr); rec = btr_pcur_get_rec(&pcur); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, heap); DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern"); @@ -3058,7 +3075,8 @@ row_ins_sec_index_entry_low( prefix, we must convert the insert into a modify of an existing record */ offsets = rec_get_offsets( - btr_cur_get_rec(&cursor), index, offsets, true, + btr_cur_get_rec(&cursor), index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &offsets_heap); err = row_ins_sec_index_entry_by_modify( @@ -3314,7 +3332,8 @@ row_ins_index_entry( dtuple_t* entry, /*!< in/out: index entry to insert */ que_thr_t* thr) /*!< in: query thread */ { - ut_ad(thr_get_trx(thr)->id || index->table->no_rollback()); + ut_ad(thr_get_trx(thr)->id || index->table->no_rollback() + || index->table->is_temporary()); DBUG_EXECUTE_IF("row_ins_index_entry_timeout", { DBUG_SET("-d,row_ins_index_entry_timeout"); @@ -3588,6 +3607,16 @@ row_ins_get_row_from_select( } } +inline +bool ins_node_t::vers_history_row() const +{ + if (!table->versioned()) + return false; + dfield_t* row_end = dtuple_get_nth_field(row, table->vers_end); + return row_end->vers_history_row(); +} + + /***********************************************************//** Inserts a row to a table. @return DB_SUCCESS if operation successfully completed, else error @@ -3626,12 +3655,31 @@ row_ins( ut_ad(node->state == INS_NODE_INSERT_ENTRIES); while (node->index != NULL) { - if (node->index->type != DICT_FTS) { + dict_index_t *index = node->index; + /* + We do not insert history rows into FTS_DOC_ID_INDEX because + it is unique by FTS_DOC_ID only and we do not want to add + row_end to unique key. Fulltext field works the way new + FTS_DOC_ID is created on every fulltext UPDATE, so holding only + FTS_DOC_ID for history is enough. + */ + const unsigned type = index->type; + if (index->type & DICT_FTS) { + } else if (!(type & DICT_UNIQUE) || index->n_uniq > 1 + || !node->vers_history_row()) { + dberr_t err = row_ins_index_entry_step(node, thr); if (err != DB_SUCCESS) { DBUG_RETURN(err); } + } else { + /* Unique indexes with system versioning must contain + the version end column. The only exception is a hidden + FTS_DOC_ID_INDEX that InnoDB may create on a hidden or + user-created FTS_DOC_ID column. */ + ut_ad(!strcmp(index->name, FTS_DOC_ID_INDEX_NAME)); + ut_ad(!strcmp(index->fields[0].name, FTS_DOC_ID_COL_NAME)); } node->index = dict_table_get_next_index(node->index); @@ -3713,13 +3761,17 @@ row_ins_step( } if (UNIV_LIKELY(!node->table->skip_alter_undo)) { - trx_write_trx_id(&node->sys_buf[DATA_ROW_ID_LEN], trx->id); + trx_write_trx_id(&node->sys_buf[DATA_TRX_ID_LEN], trx->id); } if (node->state == INS_NODE_SET_IX_LOCK) { node->state = INS_NODE_ALLOC_ROW_ID; + if (node->table->is_temporary()) { + node->trx_id = trx->id; + } + /* It may be that the current session has not yet started its transaction, or it has been committed: */ diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 45ec027beb5..c0396c33cc4 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -353,7 +353,7 @@ row_log_online_op( row_merge_buf_encode(), because here we do not encode extra_size+1 (and reserve 0 as the end-of-chunk marker). */ - size = rec_get_converted_size_temp( + size = rec_get_converted_size_temp<false>( index, tuple->fields, tuple->n_fields, &extra_size); ut_ad(size >= extra_size); ut_ad(size <= sizeof log->tail.buf); @@ -401,7 +401,7 @@ row_log_online_op( *b++ = (byte) extra_size; } - rec_convert_dtuple_to_temp( + rec_convert_dtuple_to_temp<false>( b + extra_size, index, tuple->fields, tuple->n_fields); b += size; @@ -743,7 +743,7 @@ row_log_table_delete( old_pk, old_pk->n_fields - 2)->len); ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field( old_pk, old_pk->n_fields - 1)->len); - old_pk_size = rec_get_converted_size_temp( + old_pk_size = rec_get_converted_size_temp<false>( new_index, old_pk->fields, old_pk->n_fields, &old_pk_extra_size); ut_ad(old_pk_extra_size < 0x100); @@ -756,7 +756,7 @@ row_log_table_delete( *b++ = ROW_T_DELETE; *b++ = static_cast<byte>(old_pk_extra_size); - rec_convert_dtuple_to_temp( + rec_convert_dtuple_to_temp<false>( b + old_pk_extra_size, new_index, old_pk->fields, old_pk->n_fields); @@ -856,7 +856,7 @@ row_log_table_low_redundant( rec_comp_status_t status = is_instant ? REC_STATUS_INSTANT : REC_STATUS_ORDINARY; - size = rec_get_converted_size_temp( + size = rec_get_converted_size_temp<true>( index, tuple->fields, tuple->n_fields, &extra_size, status); if (is_instant) { size++; @@ -876,7 +876,7 @@ row_log_table_low_redundant( ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field( old_pk, old_pk->n_fields - 1)->len); - old_pk_size = rec_get_converted_size_temp( + old_pk_size = rec_get_converted_size_temp<false>( new_index, old_pk->fields, old_pk->n_fields, &old_pk_extra_size); ut_ad(old_pk_extra_size < 0x100); @@ -893,7 +893,7 @@ row_log_table_low_redundant( if (old_pk_size) { *b++ = static_cast<byte>(old_pk_extra_size); - rec_convert_dtuple_to_temp( + rec_convert_dtuple_to_temp<false>( b + old_pk_extra_size, new_index, old_pk->fields, old_pk->n_fields); b += old_pk_size; @@ -916,7 +916,7 @@ row_log_table_low_redundant( *b = status; } - rec_convert_dtuple_to_temp( + rec_convert_dtuple_to_temp<true>( b + extra_size, index, tuple->fields, tuple->n_fields, status); b += size; @@ -1038,7 +1038,7 @@ row_log_table_low( ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field( old_pk, old_pk->n_fields - 1)->len); - old_pk_size = rec_get_converted_size_temp( + old_pk_size = rec_get_converted_size_temp<false>( new_index, old_pk->fields, old_pk->n_fields, &old_pk_extra_size); ut_ad(old_pk_extra_size < 0x100); @@ -1054,7 +1054,7 @@ row_log_table_low( if (old_pk_size) { *b++ = static_cast<byte>(old_pk_extra_size); - rec_convert_dtuple_to_temp( + rec_convert_dtuple_to_temp<false>( b + old_pk_extra_size, new_index, old_pk->fields, old_pk->n_fields); b += old_pk_size; @@ -1259,7 +1259,8 @@ row_log_table_get_pk( if (!offsets) { offsets = rec_get_offsets( - rec, index, NULL, true, + rec, index, nullptr, + index->n_core_fields, index->db_trx_id() + 1, heap); } @@ -1309,7 +1310,8 @@ row_log_table_get_pk( } if (!offsets) { - offsets = rec_get_offsets(rec, index, NULL, true, + offsets = rec_get_offsets(rec, index, nullptr, + index->n_core_fields, ULINT_UNDEFINED, heap); } @@ -1986,7 +1988,8 @@ all_done: return(DB_SUCCESS); } - offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL, true, + offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, nullptr, + index->n_core_fields, ULINT_UNDEFINED, &offsets_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(btr_pcur_get_rec(&pcur), offsets)); @@ -2184,7 +2187,7 @@ func_exit_committed: /* Prepare to update (or delete) the record. */ rec_offs* cur_offsets = rec_get_offsets( - btr_pcur_get_rec(&pcur), index, NULL, true, + btr_pcur_get_rec(&pcur), index, nullptr, index->n_core_fields, ULINT_UNDEFINED, &offsets_heap); if (!log->same_pk) { @@ -4045,3 +4048,9 @@ row_log_apply( DBUG_RETURN(error); } + +unsigned row_log_get_n_core_fields(const dict_index_t *index) +{ + ut_ad(index->online_log); + return index->online_log->n_core_fields; +} diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 3939d48ea9a..475424b2155 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -308,7 +308,7 @@ row_merge_buf_encode( ulint size; ulint extra_size; - size = rec_get_converted_size_temp( + size = rec_get_converted_size_temp<false>( index, entry->fields, n_fields, &extra_size); ut_ad(size >= extra_size); @@ -321,7 +321,7 @@ row_merge_buf_encode( *(*b)++ = (byte) (extra_size + 1); } - rec_convert_dtuple_to_temp(*b + extra_size, index, + rec_convert_dtuple_to_temp<false>(*b + extra_size, index, entry->fields, n_fields); *b += size; @@ -796,7 +796,7 @@ row_merge_buf_add( ulint size; ulint extra; - size = rec_get_converted_size_temp( + size = rec_get_converted_size_temp<false>( index, entry->fields, n_fields, &extra); ut_ad(data_size + extra_size == size); @@ -2037,7 +2037,8 @@ end_of_index: rec = page_cur_get_rec(cur); if (online) { - offsets = rec_get_offsets(rec, clust_index, NULL, true, + offsets = rec_get_offsets(rec, clust_index, NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &row_heap); rec_trx_id = row_get_rec_trx_id(rec, clust_index, offsets); @@ -2129,7 +2130,8 @@ end_of_index: duplicate keys. */ continue; } else { - offsets = rec_get_offsets(rec, clust_index, NULL, true, + offsets = rec_get_offsets(rec, clust_index, NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &row_heap); /* This is a locking ALTER TABLE. @@ -3824,17 +3826,20 @@ row_merge_drop_indexes_dict( trx->op_info = ""; } -/*********************************************************************//** -Drop indexes that were created before an error occurred. +/** Drop indexes that were created before an error occurred. The data dictionary must have been locked exclusively by the caller, -because the transaction will not be committed. */ +because the transaction will not be committed. +@param trx dictionary transaction +@param table table containing the indexes +@param locked True if table is locked, + false - may need to do lazy drop +@param alter_trx Alter table transaction */ void row_merge_drop_indexes( -/*===================*/ - trx_t* trx, /*!< in/out: dictionary transaction */ - dict_table_t* table, /*!< in/out: table containing the indexes */ - ibool locked) /*!< in: TRUE=table locked, - FALSE=may need to do a lazy drop */ + trx_t* trx, + dict_table_t* table, + bool locked, + const trx_t* alter_trx) { dict_index_t* index; dict_index_t* next_index; @@ -3859,7 +3864,7 @@ row_merge_drop_indexes( A concurrent purge will be prevented by dict_sys.latch. */ if (!locked && (table->get_ref_count() > 1 - || UT_LIST_GET_FIRST(table->locks))) { + || table->has_lock_other_than(alter_trx))) { /* We will have to drop the indexes later, when the table is guaranteed to be no longer in use. Mark the indexes as incomplete and corrupted, so that other @@ -3895,6 +3900,8 @@ row_merge_drop_indexes( ut_ad(prev); ut_a(table->fts); fts_drop_index(table, index, trx); + row_merge_drop_index_dict( + trx, index->id); /* We can remove a DICT_FTS index from the cache, because we do not allow ADD FULLTEXT INDEX @@ -4407,6 +4414,7 @@ row_merge_create_index( dict_index_t* index; ulint n_fields = index_def->n_fields; ulint i; + ulint n_add_vcol = 0; DBUG_ENTER("row_merge_create_index"); @@ -4431,7 +4439,7 @@ row_merge_create_index( ut_ad(ifield->col_no >= table->n_v_def); name = add_v->v_col_name[ ifield->col_no - table->n_v_def]; - index->has_new_v_col = true; + n_add_vcol++; } else { name = dict_table_get_v_col_name( table, ifield->col_no); @@ -4443,6 +4451,10 @@ row_merge_create_index( dict_mem_index_add_field(index, name, ifield->prefix_len); } + if (n_add_vcol) { + index->assign_new_v_col(n_add_vcol); + } + DBUG_RETURN(index); } @@ -4461,7 +4473,7 @@ row_merge_is_index_usable( } return(!index->is_corrupted() - && (index->table->is_temporary() + && (index->table->is_temporary() || index->table->no_rollback() || index->trx_id == 0 || !trx->read_view.is_open() || trx->read_view.changes_visible( @@ -4857,10 +4869,6 @@ wait_again: buf, i + 1, n_indexes); } - DBUG_EXECUTE_IF( - "ib_merge_wait_after_sort", - os_thread_sleep(20000000);); /* 20 sec */ - if (error == DB_SUCCESS) { BtrBulk btr_bulk(sort_idx, trx, trx->get_flush_observer()); diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index ea0719a3fc8..a6c75f7f450 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2000, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -689,6 +689,7 @@ row_mysql_handle_errors( dberr_t err; DBUG_ENTER("row_mysql_handle_errors"); + DEBUG_SYNC_C("row_mysql_handle_errors"); handle_new_error: err = trx->error_state; @@ -964,9 +965,6 @@ row_create_prebuilt( prebuilt->fts_doc_id_in_read_set = 0; prebuilt->blob_heap = NULL; - prebuilt->m_no_prefetch = false; - prebuilt->m_read_virtual_key = false; - DBUG_RETURN(prebuilt); } @@ -1804,12 +1802,11 @@ row_update_for_mysql(row_prebuilt_t* prebuilt) clust_index = dict_table_get_first_index(table); - if (prebuilt->pcur->btr_cur.index == clust_index) { - btr_pcur_copy_stored_position(node->pcur, prebuilt->pcur); - } else { - btr_pcur_copy_stored_position(node->pcur, - prebuilt->clust_pcur); - } + btr_pcur_copy_stored_position(node->pcur, + prebuilt->pcur->btr_cur.index + == clust_index + ? prebuilt->pcur + : prebuilt->clust_pcur); ut_a(node->pcur->rel_pos == BTR_PCUR_ON); @@ -2027,7 +2024,8 @@ row_unlock_for_mysql( rec_offs* offsets = offsets_; rec_offs_init(offsets_); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); rec_trx_id = row_get_rec_trx_id(rec, index, offsets); @@ -2105,10 +2103,18 @@ row_mysql_unfreeze_data_dictionary( @param buf Buffer to hold start time data */ void thd_get_query_start_data(THD *thd, char *buf); -/** Function restores btr_pcur_t, creates dtuple_t from rec_t, -sets row_end = CURRENT_TIMESTAMP/trx->id, inserts it to a table and updates -table statistics. -This is used in UPDATE CASCADE/SET NULL of a system versioning table. +/** Insert history row when evaluating foreign key referential action. + +1. Create new dtuple_t 'row' from node->historical_row; +2. Update its row_end to current timestamp; +3. Insert it to a table; +4. Update table statistics. + +This is used in UPDATE CASCADE/SET NULL of a system versioned referenced table. + +node->historical_row: dtuple_t containing pointers of row changed by refertial +action. + @param[in] thr current query thread @param[in] node a node which just updated a row in a foreign table @return DB_SUCCESS or some error */ @@ -2118,11 +2124,19 @@ static dberr_t row_update_vers_insert(que_thr_t* thr, upd_node_t* node) dfield_t* row_end; char row_end_data[8]; dict_table_t* table = node->table; + const unsigned zip_size = table->space->zip_size(); ut_ad(table->versioned()); - dtuple_t* row = node->historical_row; - ut_ad(row); - node->historical_row = NULL; + dtuple_t* row; + const ulint n_cols = dict_table_get_n_cols(table); + const ulint n_v_cols = dict_table_get_n_v_cols(table); + + ut_ad(n_cols == dtuple_get_n_fields(node->historical_row)); + ut_ad(n_v_cols == dtuple_get_n_v_fields(node->historical_row)); + + row = dtuple_create_with_vcol(node->historical_heap, n_cols, n_v_cols); + + dict_table_copy_types(row, table); ins_node_t* insert_node = ins_node_create(INS_DIRECT, table, node->historical_heap); @@ -2135,6 +2149,40 @@ static dberr_t row_update_vers_insert(que_thr_t* thr, upd_node_t* node) insert_node->common.parent = thr; ins_node_set_new_row(insert_node, row); + ut_ad(n_cols > DATA_N_SYS_COLS); + // Exclude DB_ROW_ID, DB_TRX_ID, DB_ROLL_PTR + for (ulint i = 0; i < n_cols - DATA_N_SYS_COLS; i++) { + dfield_t *src= dtuple_get_nth_field(node->historical_row, i); + dfield_t *dst= dtuple_get_nth_field(row, i); + dfield_copy(dst, src); + if (dfield_is_ext(src)) { + byte *field_data + = static_cast<byte*>(dfield_get_data(src)); + ulint ext_len; + ulint field_len = dfield_get_len(src); + + ut_a(field_len >= BTR_EXTERN_FIELD_REF_SIZE); + + ut_a(memcmp(field_data + field_len + - BTR_EXTERN_FIELD_REF_SIZE, + field_ref_zero, + BTR_EXTERN_FIELD_REF_SIZE)); + + byte *data = btr_copy_externally_stored_field( + &ext_len, field_data, zip_size, field_len, + node->historical_heap); + dfield_set_data(dst, data, ext_len); + } + } + + for (ulint i = 0; i < n_v_cols; i++) { + dfield_t *dst= dtuple_get_nth_v_field(row, i); + dfield_t *src= dtuple_get_nth_v_field(node->historical_row, i); + dfield_copy(dst, src); + } + + node->historical_row = NULL; + row_end = dtuple_get_nth_field(row, table->vers_end); if (dict_table_get_nth_col(table, table->vers_end)->vers_native()) { mach_write_to_8(row_end_data, trx->id); @@ -4292,6 +4340,8 @@ row_rename_table_for_mysql( "END;\n" , FALSE, trx); + ut_ad(err != DB_DUPLICATE_KEY); + /* SYS_TABLESPACES and SYS_DATAFILES need to be updated if the table is in a single-table tablespace. */ if (err != DB_SUCCESS || !dict_table_is_file_per_table(table)) { @@ -4739,7 +4789,7 @@ func_exit: rec = buf + mach_read_from_4(buf); - offsets = rec_get_offsets(rec, index, offsets_, true, + offsets = rec_get_offsets(rec, index, offsets_, index->n_core_fields, ULINT_UNDEFINED, &heap); if (prev_entry != NULL) { diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index e9eaf27977d..cbf4ddce279 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -125,8 +125,9 @@ row_purge_remove_clust_if_poss_low( rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); mem_heap_t* heap = NULL; - rec_offs* offsets = rec_get_offsets( - rec, index, offsets_, true, ULINT_UNDEFINED, &heap); + rec_offs* offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, + ULINT_UNDEFINED, &heap); bool success = true; if (node->roll_ptr != row_get_rec_roll_ptr(rec, index, offsets)) { @@ -732,7 +733,7 @@ row_purge_skip_uncommitted_virtual_index( not support LOCK=NONE when adding an index on newly added virtual column.*/ while (index != NULL && dict_index_has_virtual(index) - && !index->is_committed() && index->has_new_v_col) { + && !index->is_committed() && index->has_new_v_col()) { index = dict_table_get_next_index(index); } } @@ -806,7 +807,8 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr) rec_offs offsets_[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS + 2]; rec_offs_init(offsets_); rec_offs* offsets = rec_get_offsets( - rec, index, offsets_, true, trx_id_pos + 2, &heap); + rec, index, offsets_, index->n_core_fields, + trx_id_pos + 2, &heap); ut_ad(heap == NULL); ut_ad(dict_index_get_nth_field(index, trx_id_pos) @@ -1364,7 +1366,7 @@ purge_node_t::validate_pcur() dict_index_t* clust_index = pcur.btr_cur.index; rec_offs* offsets = rec_get_offsets( - pcur.old_rec, clust_index, NULL, true, + pcur.old_rec, clust_index, NULL, pcur.old_n_core_fields, pcur.old_n_fields, &heap); /* Here we are comparing the purge ref record and the stored initial diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc index f37b810b7eb..f0e5385be85 100644 --- a/storage/innobase/row/row0row.cc +++ b/storage/innobase/row/row0row.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2018, 2020, MariaDB Corporation. +Copyright (c) 2018, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -295,12 +295,14 @@ row_build_index_entry_low( continue; } + ut_ad(!(index->type & DICT_FTS)); + ulint len = dfield_get_len(dfield); if (f.prefix_len == 0 && (!dfield_is_ext(dfield) || dict_index_is_clust(index))) { - /* The dfield_copy() above suffices for + /* The *dfield = *dfield2 above suffices for columns that are stored in-page, or for clustered index record columns that are not part of a column prefix in the PRIMARY KEY. */ @@ -439,7 +441,8 @@ row_build_low( ut_ad(!col_map || col_table); if (!offsets) { - offsets = rec_get_offsets(rec, index, offsets_, true, + offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &tmp_heap); } else { ut_ad(rec_offs_validate(rec, index, offsets)); @@ -1003,7 +1006,7 @@ row_build_row_ref( ut_ad(heap != NULL); ut_ad(!dict_index_is_clust(index)); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, &tmp_heap); /* Secondary indexes must not contain externally stored columns. */ ut_ad(!rec_offs_any_extern(offsets)); @@ -1112,7 +1115,8 @@ row_build_row_ref_in_tuple( ut_ad(clust_index); if (!offsets) { - offsets = rec_get_offsets(rec, index, offsets_, true, + offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &heap); } else { ut_ad(rec_offs_validate(rec, index, offsets)); diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 8b03ef85ec9..74947b78774 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -2,7 +2,7 @@ Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -81,9 +81,9 @@ is alphabetically the same as the corresponding BLOB column in the clustered index record. NOTE: the comparison is NOT done as a binary comparison, but character fields are compared with collation! -@return TRUE if the columns are equal */ +@return whether the columns are equal */ static -ibool +bool row_sel_sec_rec_is_for_blob( /*========================*/ ulint mtype, /*!< in: main type */ @@ -102,19 +102,18 @@ row_sel_sec_rec_is_for_blob( const byte* sec_field, /*!< in: column in secondary index */ ulint sec_len, /*!< in: length of sec_field */ ulint prefix_len, /*!< in: index column prefix length - in bytes */ + in bytes, or 0 for full column */ dict_table_t* table) /*!< in: table */ { ulint len; - byte buf[REC_VERSION_56_MAX_INDEX_COL_LEN]; + byte buf[REC_VERSION_56_MAX_INDEX_COL_LEN + 1]; /* This function should never be invoked on tables in ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPACT, because they should always contain enough prefix in the clustered index record. */ ut_ad(dict_table_has_atomic_blobs(table)); ut_a(clust_len >= BTR_EXTERN_FIELD_REF_SIZE); - ut_ad(prefix_len >= sec_len); - ut_ad(prefix_len > 0); + ut_ad(!prefix_len || prefix_len >= sec_len); ut_a(prefix_len <= sizeof buf); if (!memcmp(clust_field + clust_len - BTR_EXTERN_FIELD_REF_SIZE, @@ -123,11 +122,12 @@ row_sel_sec_rec_is_for_blob( This record should only be seen by recv_recovery_rollback_active() or any TRX_ISO_READ_UNCOMMITTED transactions. */ - return(FALSE); + return false; } len = btr_copy_externally_stored_field_prefix( - buf, prefix_len, table->space->zip_size(), + buf, prefix_len ? prefix_len : sizeof buf, + table->space->zip_size(), clust_field, clust_len); if (len == 0) { @@ -136,11 +136,18 @@ row_sel_sec_rec_is_for_blob( referring to this clustered index record, because btr_free_externally_stored_field() is called after all secondary index entries of the row have been purged. */ - return(FALSE); + return false; } - len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen, - prefix_len, len, (const char*) buf); + if (prefix_len) { + len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen, + prefix_len, len, + reinterpret_cast<const char*> + (buf)); + } else if (len >= sizeof buf) { + ut_ad("too long column" == 0); + return false; + } return(!cmp_data_data(mtype, prtype, buf, len, sec_field, sec_len)); } @@ -203,9 +210,11 @@ row_sel_sec_rec_is_for_clust_rec( ib_vcol_row vc(heap); clust_offs = rec_get_offsets(clust_rec, clust_index, clust_offs, - true, ULINT_UNDEFINED, &heap); + clust_index->n_core_fields, + ULINT_UNDEFINED, &heap); sec_offs = rec_get_offsets(sec_rec, sec_index, sec_offs, - true, ULINT_UNDEFINED, &heap); + sec_index->n_fields, + ULINT_UNDEFINED, &heap); n = dict_index_get_n_ordering_defined_by_user(sec_index); @@ -215,12 +224,13 @@ row_sel_sec_rec_is_for_clust_rec( ulint clust_pos = 0; ulint clust_len = 0; ulint len; - bool is_virtual; ifield = dict_index_get_nth_field(sec_index, i); col = dict_field_get_col(ifield); - is_virtual = col->is_virtual(); + sec_field = rec_get_nth_field(sec_rec, sec_offs, i, &sec_len); + + const bool is_virtual = col->is_virtual(); /* For virtual column, its value will need to be reconstructed from base column in cluster index */ @@ -252,43 +262,55 @@ row_sel_sec_rec_is_for_clust_rec( innobase_report_computed_value_failed(row); return DB_COMPUTE_VALUE_FAILED; } - clust_len = vfield->len; + len = clust_len = vfield->len; clust_field = static_cast<byte*>(vfield->data); } else { clust_pos = dict_col_get_clust_pos(col, clust_index); + clust_field = rec_get_nth_cfield( clust_rec, clust_index, clust_offs, clust_pos, &clust_len); - } - - sec_field = rec_get_nth_field(sec_rec, sec_offs, i, &sec_len); - - len = clust_len; - - if (ifield->prefix_len > 0 && len != UNIV_SQL_NULL - && sec_len != UNIV_SQL_NULL && !is_virtual) { + if (clust_len == UNIV_SQL_NULL) { + if (sec_len == UNIV_SQL_NULL) { + continue; + } + return DB_SUCCESS; + } + if (sec_len == UNIV_SQL_NULL) { + return DB_SUCCESS; + } + len = clust_len; if (rec_offs_nth_extern(clust_offs, clust_pos)) { len -= BTR_EXTERN_FIELD_REF_SIZE; } - len = dtype_get_at_most_n_mbchars( - col->prtype, col->mbminlen, col->mbmaxlen, - ifield->prefix_len, len, (char*) clust_field); - - if (rec_offs_nth_extern(clust_offs, clust_pos) - && len < sec_len) { - if (!row_sel_sec_rec_is_for_blob( - col->mtype, col->prtype, - col->mbminlen, col->mbmaxlen, - clust_field, clust_len, - sec_field, sec_len, - ifield->prefix_len, - clust_index->table)) { - return DB_SUCCESS; + if (ulint prefix_len = ifield->prefix_len) { + len = dtype_get_at_most_n_mbchars( + col->prtype, col->mbminlen, + col->mbmaxlen, prefix_len, len, + reinterpret_cast<const char*>( + clust_field)); + if (len < sec_len) { + goto check_for_blob; } + } else { +check_for_blob: + if (rec_offs_nth_extern(clust_offs, + clust_pos)) { + if (!row_sel_sec_rec_is_for_blob( + col->mtype, col->prtype, + col->mbminlen, + col->mbmaxlen, + clust_field, clust_len, + sec_field, sec_len, + prefix_len, + clust_index->table)) { + return DB_SUCCESS; + } - continue; + continue; + } } } @@ -908,7 +930,9 @@ row_sel_get_clust_rec( offsets = rec_get_offsets(rec, btr_pcur_get_btr_cur(&plan->pcur)->index, - offsets, true, ULINT_UNDEFINED, &heap); + offsets, + btr_pcur_get_btr_cur(&plan->pcur)->index + ->n_core_fields, ULINT_UNDEFINED, &heap); row_build_row_ref_fast(plan->clust_ref, plan->clust_map, rec, offsets); @@ -943,7 +967,8 @@ row_sel_get_clust_rec( goto err_exit; } - offsets = rec_get_offsets(clust_rec, index, offsets, true, + offsets = rec_get_offsets(clust_rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); if (!node->read_view) { @@ -1163,7 +1188,8 @@ re_scan: rec = btr_pcur_get_rec(pcur); my_offsets = offsets_; - my_offsets = rec_get_offsets(rec, index, my_offsets, true, + my_offsets = rec_get_offsets(rec, index, my_offsets, + index->n_fields, ULINT_UNDEFINED, &heap); /* No match record */ @@ -1186,7 +1212,7 @@ re_scan: rtr_rec_t* rtr_rec = &(*it); my_offsets = rec_get_offsets( - rtr_rec->r_rec, index, my_offsets, true, + rtr_rec->r_rec, index, my_offsets, index->n_fields, ULINT_UNDEFINED, &heap); err = lock_sec_rec_read_check_and_lock( @@ -1495,7 +1521,7 @@ exhausted: rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs* offsets = offsets_; rec_offs_init(offsets_); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, &heap); if (dict_index_is_clust(index)) { @@ -1711,7 +1737,7 @@ rec_loop: trx = thr_get_trx(thr); offsets = rec_get_offsets(next_rec, index, offsets, - true, + index->n_core_fields, ULINT_UNDEFINED, &heap); /* If innodb_locks_unsafe_for_binlog option is used @@ -1776,7 +1802,8 @@ skip_lock: ulint lock_type; trx_t* trx; - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); trx = thr_get_trx(thr); @@ -1863,7 +1890,7 @@ skip_lock: /* PHASE 3: Get previous version in a consistent read */ cons_read_requires_clust_rec = FALSE; - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, &heap); if (consistent_read) { @@ -1894,7 +1921,8 @@ skip_lock: exhausted. */ offsets = rec_get_offsets( - rec, index, offsets, true, + rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); /* Fetch the columns needed in @@ -3037,8 +3065,7 @@ static bool row_sel_store_mysql_rec( search or virtual key read is not requested. */ if (!rec_clust || !prebuilt->index->has_virtual() - || (!prebuilt->read_just_key - && !prebuilt->m_read_virtual_key)) { + || !prebuilt->read_just_key) { /* Initialize the NULL bit. */ if (templ->mysql_null_bit_mask) { mysql_rec[templ->mysql_null_byte_offset] @@ -3056,23 +3083,8 @@ static bool row_sel_store_mysql_rec( const dfield_t* dfield = dtuple_get_nth_v_field( vrow, col->v_pos); - /* If this is a partitioned table, it might request - InnoDB to fill out virtual column data for serach - index key values while other non key columns are also - getting selected. The non-key virtual columns may - not be materialized and we should skip them. */ if (dfield_get_type(dfield)->mtype == DATA_MISSING) { -#ifdef UNIV_DEBUG - ulint prefix; -#endif /* UNIV_DEBUG */ - ut_ad(prebuilt->m_read_virtual_key); - - /* If it is part of index key the data should - have been materialized. */ - ut_ad(dict_index_get_nth_col_or_prefix_pos( - prebuilt->index, col->v_pos, false, - true, &prefix) == ULINT_UNDEFINED); - + ut_ad("no ha_innopart in MariaDB" == 0); continue; } @@ -3192,7 +3204,8 @@ class Row_sel_get_clust_rec_for_mysql ut_ad(rec_offs_validate(cached_clust_rec, index, offsets)); ut_ad(index->first_user_field() <= rec_offs_n_fields(offsets)); - ut_ad(vers_offs == rec_get_offsets(cached_old_vers, index, vers_offs, true, + ut_ad(vers_offs == rec_get_offsets(cached_old_vers, index, vers_offs, + index->n_core_fields, index->db_trx_id(), &heap)); ut_ad(!heap); for (auto n= index->db_trx_id(); n--; ) @@ -3379,7 +3392,8 @@ Row_sel_get_clust_rec_for_mysql::operator()( goto func_exit; } - *offsets = rec_get_offsets(clust_rec, clust_index, *offsets, true, + *offsets = rec_get_offsets(clust_rec, clust_index, *offsets, + clust_index->n_core_fields, ULINT_UNDEFINED, offset_heap); if (prebuilt->select_lock_type != LOCK_NONE) { @@ -3453,7 +3467,8 @@ Row_sel_get_clust_rec_for_mysql::operator()( ut_d(check_eq(clust_index, *offsets)); *offsets = rec_get_offsets( old_vers, clust_index, *offsets, - true, ULINT_UNDEFINED, offset_heap); + clust_index->n_core_fields, + ULINT_UNDEFINED, offset_heap); } } @@ -3876,7 +3891,7 @@ exhausted: /* This is a non-locking consistent read: if necessary, fetch a previous version of the record */ - *offsets = rec_get_offsets(rec, index, *offsets, true, + *offsets = rec_get_offsets(rec, index, *offsets, index->n_core_fields, ULINT_UNDEFINED, heap); if (!lock_clust_rec_cons_read_sees(rec, index, *offsets, @@ -4042,7 +4057,7 @@ row_sel_fill_vrow( ut_ad(!index->is_instant()); ut_ad(page_rec_is_leaf(rec)); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, &heap); *vrow = dtuple_create_with_vcol( @@ -4289,8 +4304,7 @@ row_search_mvcc( index key, if this is covered index scan or virtual key read is requested. */ bool need_vrow = dict_index_has_virtual(prebuilt->index) - && (prebuilt->read_just_key - || prebuilt->m_read_virtual_key); + && prebuilt->read_just_key; /* Reset the new record lock info if srv_locks_unsafe_for_binlog is set or session is using a READ COMMITTED isolation level. Then @@ -4693,7 +4707,7 @@ wait_table_again: const rec_t* next_rec = page_rec_get_next_const(rec); offsets = rec_get_offsets(next_rec, index, offsets, - true, + index->n_core_fields, ULINT_UNDEFINED, &heap); err = sel_set_rec_lock(pcur, next_rec, index, offsets, @@ -4777,7 +4791,8 @@ rec_loop: level we do not lock gaps. Supremum record is really a gap and therefore we do not set locks there. */ - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); err = sel_set_rec_lock(pcur, rec, index, offsets, @@ -4880,7 +4895,7 @@ wrong_offs: ut_ad(fil_page_index_page_check(btr_pcur_get_page(pcur))); ut_ad(btr_page_get_index_id(btr_pcur_get_page(pcur)) == index->id); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, ULINT_UNDEFINED, &heap); if (UNIV_UNLIKELY(srv_force_recovery > 0)) { @@ -5143,7 +5158,8 @@ no_gap_lock: Do a normal locking read. */ offsets = rec_get_offsets( - rec, index, offsets, true, + rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); goto locks_ok; case DB_DEADLOCK: @@ -5435,7 +5451,6 @@ use_covering_index: if ((match_mode == ROW_SEL_EXACT || prebuilt->n_rows_fetched >= MYSQL_FETCH_CACHE_THRESHOLD) && prebuilt->select_lock_type == LOCK_NONE - && !prebuilt->m_no_prefetch && !prebuilt->templ_contains_blob && !prebuilt->clust_index_was_generated && !prebuilt->used_in_HANDLER @@ -5517,7 +5532,7 @@ use_covering_index: /* We used 'offsets' for the clust rec, recalculate them for 'rec' */ offsets = rec_get_offsets(rec, index, offsets, - true, + index->n_core_fields, ULINT_UNDEFINED, &heap); result_rec = rec; @@ -5977,7 +5992,7 @@ row_search_autoinc_read_column( rec_offs_init(offsets_); ut_ad(page_rec_is_leaf(rec)); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, col_no + 1, &heap); if (rec_offs_nth_sql_null(offsets, col_no)) { diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc index 160de0b88a5..617fcf68c20 100644 --- a/storage/innobase/row/row0uins.cc +++ b/storage/innobase/row/row0uins.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -111,7 +111,8 @@ row_undo_ins_remove_clust_rec( rec_t* rec = btr_pcur_get_rec(&node->pcur); - ut_ad(rec_get_trx_id(rec, index) == node->trx->id); + ut_ad(rec_get_trx_id(rec, index) == node->trx->id + || node->table->is_temporary()); ut_ad(!rec_get_deleted_flag(rec, index->table->not_redundant()) || rec_is_alter_metadata(rec, index->table->not_redundant())); ut_ad(rec_is_metadata(rec, index->table->not_redundant()) @@ -120,7 +121,8 @@ row_undo_ins_remove_clust_rec( if (online && dict_index_is_online_ddl(index)) { mem_heap_t* heap = NULL; const rec_offs* offsets = rec_get_offsets( - rec, index, NULL, true, ULINT_UNDEFINED, &heap); + rec, index, NULL, index->n_core_fields, + ULINT_UNDEFINED, &heap); row_log_table_delete(rec, index, offsets, NULL); mem_heap_free(heap); } else { diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index 3b72f173862..ae2a710d24b 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -110,7 +110,8 @@ row_undo_mod_clust_low( ut_ad(success); ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur), btr_cur_get_index(btr_cur)) - == thr_get_trx(thr)->id); + == thr_get_trx(thr)->id + || btr_cur_get_index(btr_cur)->table->is_temporary()); ut_ad(node->ref != &trx_undo_metadata || node->update->info_bits == REC_INFO_METADATA_ADD || node->update->info_bits == REC_INFO_METADATA_ALTER); @@ -213,8 +214,9 @@ static ulint row_trx_id_offset(const rec_t* rec, const dict_index_t* index) rec_offs_init(offsets_); mem_heap_t* heap = NULL; const ulint trx_id_pos = index->n_uniq ? index->n_uniq : 1; - rec_offs* offsets = rec_get_offsets(rec, index, offsets_, true, - trx_id_pos + 1, &heap); + rec_offs* offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, + trx_id_pos + 1, &heap); ut_ad(!heap); ulint len; trx_id_offset = rec_get_nth_field_offs( @@ -482,9 +484,9 @@ row_undo_mod_clust( } else { ut_ad(index->n_uniq <= MAX_REF_PARTS); rec_offs_init(offsets_); - offsets = rec_get_offsets( - rec, index, offsets_, true, trx_id_pos + 2, - &heap); + offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, + trx_id_pos + 2, &heap); ulint len; trx_id_offset = rec_get_nth_field_offs( offsets, trx_id_pos, &len); @@ -869,7 +871,8 @@ try_again: offsets_heap = NULL; offsets = rec_get_offsets( btr_cur_get_rec(btr_cur), - index, NULL, true, ULINT_UNDEFINED, &offsets_heap); + index, nullptr, index->n_core_fields, ULINT_UNDEFINED, + &offsets_heap); update = row_upd_build_sec_rec_difference_binary( btr_cur_get_rec(btr_cur), index, offsets, entry, heap); if (upd_get_n_fields(update) == 0) { diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc index 6cf41d2422c..11956d11b8d 100644 --- a/storage/innobase/row/row0undo.cc +++ b/storage/innobase/row/row0undo.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -187,7 +187,8 @@ row_undo_search_clust_to_pcur( rec = btr_pcur_get_rec(&node->pcur); - offsets = rec_get_offsets(rec, clust_index, offsets, true, + offsets = rec_get_offsets(rec, clust_index, offsets, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); found = row_get_rec_roll_ptr(rec, clust_index, offsets) @@ -195,7 +196,7 @@ row_undo_search_clust_to_pcur( if (found) { ut_ad(row_get_rec_trx_id(rec, clust_index, offsets) - == node->trx->id); + == node->trx->id || node->table->is_temporary()); if (dict_table_has_atomic_blobs(node->table)) { /* There is no prefix of externally stored diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 7c05084d4d9..b594f8a8020 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -126,25 +126,23 @@ NOTE that since we do not hold dict_sys.latch when leaving the function, it may be that the referencing table has been dropped when we leave this function: this function is only for heuristic use! -@return TRUE if referenced */ +@return true if referenced */ static -ibool +bool row_upd_index_is_referenced( /*========================*/ dict_index_t* index, /*!< in: index */ trx_t* trx) /*!< in: transaction */ { dict_table_t* table = index->table; - ibool froze_data_dict = FALSE; - ibool is_referenced = FALSE; if (table->referenced_set.empty()) { - return(FALSE); + return false; } - if (trx->dict_operation_lock_mode == 0) { + const bool froze_data_dict = !trx->dict_operation_lock_mode; + if (froze_data_dict) { row_mysql_freeze_data_dictionary(trx); - froze_data_dict = TRUE; } dict_foreign_set::iterator it @@ -152,13 +150,13 @@ row_upd_index_is_referenced( table->referenced_set.end(), dict_foreign_with_index(index)); - is_referenced = (it != table->referenced_set.end()); + const bool is_referenced = (it != table->referenced_set.end()); if (froze_data_dict) { row_mysql_unfreeze_data_dictionary(trx); } - return(is_referenced); + return is_referenced; } #ifdef WITH_WSREP @@ -1002,7 +1000,8 @@ row_upd_build_difference_binary( n_diff = 0; if (!offsets) { - offsets = rec_get_offsets(rec, index, offsets_, true, + offsets = rec_get_offsets(rec, index, offsets_, + index->n_core_fields, ULINT_UNDEFINED, &heap); } else { ut_ad(rec_offs_validate(rec, index, offsets)); @@ -2203,7 +2202,8 @@ row_upd_store_row( rec = btr_pcur_get_rec(node->pcur); - offsets = rec_get_offsets(rec, clust_index, offsets_, true, + offsets = rec_get_offsets(rec, clust_index, offsets_, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); if (dict_table_has_atomic_blobs(node->table)) { @@ -2264,7 +2264,6 @@ row_upd_sec_index_entry( dtuple_t* entry; dict_index_t* index; btr_cur_t* btr_cur; - ibool referenced; dberr_t err = DB_SUCCESS; trx_t* trx = thr_get_trx(thr); ulint mode; @@ -2275,7 +2274,7 @@ row_upd_sec_index_entry( index = node->index; - referenced = row_upd_index_is_referenced(index, trx); + const bool referenced = row_upd_index_is_referenced(index, trx); #ifdef WITH_WSREP bool foreign = wsrep_row_upd_index_is_foreign(index, trx); #endif /* WITH_WSREP */ @@ -2306,7 +2305,9 @@ row_upd_sec_index_entry( break; } - if (!index->is_committed()) { + bool uncommitted = !index->is_committed(); + + if (uncommitted) { /* The index->online_status may change if the index is or was being created online, but not committed yet. It is protected by index->lock. */ @@ -2435,7 +2436,7 @@ row_upd_sec_index_entry( && !wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { rec_offs* offsets = rec_get_offsets( - rec, index, NULL, true, + rec, index, NULL, index->n_core_fields, ULINT_UNDEFINED, &heap); err = wsrep_row_upd_check_foreign_constraints( @@ -2478,12 +2479,9 @@ row_upd_sec_index_entry( ut_ad(err == DB_SUCCESS); if (referenced) { - - rec_offs* offsets; - - offsets = rec_get_offsets( - rec, index, NULL, true, ULINT_UNDEFINED, - &heap); + rec_offs* offsets = rec_get_offsets( + rec, index, NULL, index->n_core_fields, + ULINT_UNDEFINED, &heap); /* NOTE that the following call loses the position of pcur ! */ @@ -2503,11 +2501,38 @@ row_upd_sec_index_entry( mem_heap_empty(heap); + DEBUG_SYNC_C_IF_THD(trx->mysql_thd, + "before_row_upd_sec_new_index_entry"); + + uncommitted = !index->is_committed(); + if (uncommitted) { + mtr.start(); + /* The index->online_status may change if the index is + being rollbacked. It is protected by index->lock. */ + + mtr_s_lock_index(index, &mtr); + + switch (dict_index_get_online_status(index)) { + case ONLINE_INDEX_COMPLETE: + case ONLINE_INDEX_CREATION: + break; + case ONLINE_INDEX_ABORTED: + case ONLINE_INDEX_ABORTED_DROPPED: + mtr_commit(&mtr); + goto func_exit; + } + + } + /* Build a new index entry */ entry = row_build_index_entry(node->upd_row, node->upd_ext, index, heap); ut_a(entry); + if (uncommitted) { + mtr_commit(&mtr); + } + /* Insert new index entry */ err = row_ins_sec_index_entry(index, entry, thr, !node->is_delete); @@ -2653,12 +2678,13 @@ row_upd_clust_rec_by_insert( upd_node_t* node, /*!< in/out: row update node */ dict_index_t* index, /*!< in: clustered index of the record */ que_thr_t* thr, /*!< in: query thread */ - ibool referenced,/*!< in: TRUE if index may be referenced in + bool referenced,/*!< in: whether index may be referenced in a foreign key constraint */ #ifdef WITH_WSREP bool foreign,/*!< in: whether this is a foreign key */ #endif - mtr_t* mtr) /*!< in/out: mtr; gets committed here */ + mtr_t* mtr) /*!< in/out: mini-transaction, + may be committed and restarted */ { mem_heap_t* heap; btr_pcur_t* pcur; @@ -2707,7 +2733,8 @@ row_upd_clust_rec_by_insert( we update the primary key. Delete-mark the old record in the clustered index and prepare to insert a new entry. */ rec = btr_cur_get_rec(btr_cur); - offsets = rec_get_offsets(rec, index, offsets, true, + offsets = rec_get_offsets(rec, index, offsets, + index->n_core_fields, ULINT_UNDEFINED, &heap); ut_ad(page_rec_is_user_rec(rec)); @@ -2728,10 +2755,7 @@ row_upd_clust_rec_by_insert( btr_cur_get_block(btr_cur), rec, index, offsets, thr, node->row, mtr); if (err != DB_SUCCESS) { -err_exit: - mtr_commit(mtr); - mem_heap_free(heap); - return(err); + goto err_exit; } /* If the the new row inherits externally stored @@ -2791,14 +2815,14 @@ check_fk: } } - mtr_commit(mtr); + mtr->commit(); + mtr->start(); + node->state = UPD_NODE_INSERT_CLUSTERED; err = row_ins_clust_index_entry(index, entry, thr, dtuple_get_n_ext(entry)); - node->state = UPD_NODE_INSERT_CLUSTERED; - +err_exit: mem_heap_free(heap); - return(err); } @@ -2818,7 +2842,8 @@ row_upd_clust_rec( mem_heap_t** offsets_heap, /*!< in/out: memory heap, can be emptied */ que_thr_t* thr, /*!< in: query thread */ - mtr_t* mtr) /*!< in: mtr; gets committed here */ + mtr_t* mtr) /*!< in,out: mini-transaction; may be + committed and restarted here */ { mem_heap_t* heap = NULL; big_rec_t* big_rec = NULL; @@ -2864,16 +2889,15 @@ row_upd_clust_rec( goto success; } - mtr_commit(mtr); - if (buf_LRU_buf_pool_running_out()) { - err = DB_LOCK_TABLE_FULL; goto func_exit; } + /* We may have to modify the tree structure: do a pessimistic descent down the index tree */ + mtr->commit(); mtr->start(); if (index->table->is_temporary()) { @@ -2923,7 +2947,6 @@ success: } } - mtr_commit(mtr); func_exit: if (heap) { mem_heap_free(heap); @@ -2948,17 +2971,17 @@ row_upd_del_mark_clust_rec( rec_offs* offsets,/*!< in/out: rec_get_offsets() for the record under the cursor */ que_thr_t* thr, /*!< in: query thread */ - ibool referenced, - /*!< in: TRUE if index may be referenced in + bool referenced, + /*!< in: whether index may be referenced in a foreign key constraint */ #ifdef WITH_WSREP bool foreign,/*!< in: whether this is a foreign key */ #endif - mtr_t* mtr) /*!< in: mtr; gets committed here */ + mtr_t* mtr) /*!< in,out: mini-transaction; + will be committed and restarted */ { btr_pcur_t* pcur; btr_cur_t* btr_cur; - dberr_t err; rec_t* rec; trx_t* trx = thr_get_trx(thr); @@ -2974,8 +2997,7 @@ row_upd_del_mark_clust_rec( if (!row_upd_store_row(node, trx->mysql_thd, thr->prebuilt && thr->prebuilt->table == node->table ? thr->prebuilt->m_mysql_table : NULL)) { - err = DB_COMPUTE_VALUE_FAILED; - return err; + return DB_COMPUTE_VALUE_FAILED; } /* Mark the clustered index record deleted; we do not have to check @@ -2983,7 +3005,7 @@ row_upd_del_mark_clust_rec( rec = btr_cur_get_rec(btr_cur); - err = btr_cur_del_mark_set_clust_rec( + dberr_t err = btr_cur_del_mark_set_clust_rec( btr_cur_get_block(btr_cur), rec, index, offsets, thr, node->row, mtr); @@ -3020,8 +3042,6 @@ row_upd_del_mark_clust_rec( #endif /* WITH_WSREP */ } - mtr_commit(mtr); - return(err); } @@ -3038,14 +3058,12 @@ row_upd_clust_step( { dict_index_t* index; btr_pcur_t* pcur; - ibool success; dberr_t err; mtr_t mtr; rec_t* rec; mem_heap_t* heap = NULL; rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs* offsets; - ibool referenced; ulint flags; trx_t* trx = thr_get_trx(thr); @@ -3053,8 +3071,7 @@ row_upd_clust_step( index = dict_table_get_first_index(node->table); - referenced = row_upd_index_is_referenced(index, trx); - + const bool referenced = row_upd_index_is_referenced(index, trx); #ifdef WITH_WSREP const bool foreign = wsrep_row_upd_index_is_foreign(index, trx); #endif @@ -3100,14 +3117,9 @@ row_upd_clust_step( mode = BTR_MODIFY_LEAF; } - success = btr_pcur_restore_position(mode, pcur, &mtr); - - if (!success) { + if (!btr_pcur_restore_position(mode, pcur, &mtr)) { err = DB_RECORD_NOT_FOUND; - - mtr_commit(&mtr); - - return(err); + goto exit_func; } /* If this is a row in SYS_INDEXES table of the data dictionary, @@ -3127,19 +3139,14 @@ row_upd_clust_step( mtr.start(); index->set_modified(mtr); - success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, - &mtr); - if (!success) { + if (!btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, &mtr)) { err = DB_ERROR; - - mtr.commit(); - - return(err); + goto exit_func; } } rec = btr_pcur_get_rec(pcur); - offsets = rec_get_offsets(rec, index, offsets_, true, + offsets = rec_get_offsets(rec, index, offsets_, index->n_core_fields, ULINT_UNDEFINED, &heap); if (!flags && !node->has_clust_rec_x_lock) { @@ -3147,7 +3154,6 @@ row_upd_clust_step( 0, btr_pcur_get_block(pcur), rec, index, offsets, thr); if (err != DB_SUCCESS) { - mtr.commit(); goto exit_func; } } @@ -3158,8 +3164,6 @@ row_upd_clust_step( btr_pcur_get_block(pcur), page_rec_get_heap_no(rec))); - /* NOTE: the following function calls will also commit mtr */ - if (node->is_delete == PLAIN_DELETE) { err = row_upd_del_mark_clust_rec( node, index, offsets, thr, referenced, @@ -3167,13 +3171,7 @@ row_upd_clust_step( foreign, #endif &mtr); - - if (err == DB_SUCCESS) { - node->state = UPD_NODE_UPDATE_ALL_SEC; - node->index = dict_table_get_next_index(index); - } - - goto exit_func; + goto all_done; } /* If the update is made for MySQL, we already have the update vector @@ -3188,14 +3186,13 @@ row_upd_clust_step( } if (!node->is_delete && node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) { - err = row_upd_clust_rec( flags, node, index, offsets, &heap, thr, &mtr); goto exit_func; } - if(!row_upd_store_row(node, trx->mysql_thd, - thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL)) { + if (!row_upd_store_row(node, trx->mysql_thd, thr->prebuilt + ? thr->prebuilt->m_mysql_table : NULL)) { err = DB_COMPUTE_VALUE_FAILED; goto exit_func; } @@ -3220,34 +3217,31 @@ row_upd_clust_step( foreign, #endif &mtr); - if (err != DB_SUCCESS) { - - goto exit_func; +all_done: + if (err == DB_SUCCESS) { + node->state = UPD_NODE_UPDATE_ALL_SEC; +success: + node->index = dict_table_get_next_index(index); } - - node->state = UPD_NODE_UPDATE_ALL_SEC; } else { err = row_upd_clust_rec( flags, node, index, offsets, &heap, thr, &mtr); - if (err != DB_SUCCESS) { - - goto exit_func; + if (err == DB_SUCCESS) { + ut_ad(node->is_delete != PLAIN_DELETE); + node->state = node->is_delete + ? UPD_NODE_UPDATE_ALL_SEC + : UPD_NODE_UPDATE_SOME_SEC; + goto success; } - - ut_ad(node->is_delete != PLAIN_DELETE); - node->state = node->is_delete ? - UPD_NODE_UPDATE_ALL_SEC : - UPD_NODE_UPDATE_SOME_SEC; } - node->index = dict_table_get_next_index(index); - exit_func: - if (heap) { + mtr.commit(); + if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } - return(err); + return err; } /***********************************************************//** diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc index aa2400a91ad..cde4e9e7b89 100644 --- a/storage/innobase/row/row0vers.cc +++ b/storage/innobase/row/row0vers.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -116,7 +116,8 @@ row_vers_impl_x_locked_low( heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(clust_rec, clust_index, clust_offsets_, - true, ULINT_UNDEFINED, &heap); + clust_index->n_core_fields, + ULINT_UNDEFINED, &heap); trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets); if (trx_id == 0) { @@ -239,7 +240,8 @@ not_locked: } clust_offsets = rec_get_offsets( - prev_version, clust_index, clust_offsets_, true, + prev_version, clust_index, clust_offsets_, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); vers_del = rec_get_deleted_flag(prev_version, comp); @@ -569,7 +571,8 @@ row_vers_build_cur_vrow_low( clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, - true, ULINT_UNDEFINED, &heap); + clust_index->n_core_fields, + ULINT_UNDEFINED, &heap); ulint entry_len = dict_index_get_n_fields(index); @@ -711,7 +714,8 @@ row_vers_vc_matches_cluster( clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, - true, ULINT_UNDEFINED, &heap); + clust_index->n_core_fields, + ULINT_UNDEFINED, &heap); ulint entry_len = dict_index_get_n_fields(index); @@ -849,7 +853,8 @@ row_vers_build_cur_vrow( index, roll_ptr, trx_id, v_heap, &cur_vrow, mtr); } - *clust_offsets = rec_get_offsets(rec, clust_index, NULL, true, + *clust_offsets = rec_get_offsets(rec, clust_index, NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); return(cur_vrow); } @@ -906,7 +911,8 @@ row_vers_old_has_index_entry( comp = page_rec_is_comp(rec); ut_ad(!dict_table_is_comp(index->table) == !comp); heap = mem_heap_create(1024); - clust_offsets = rec_get_offsets(rec, clust_index, NULL, true, + clust_offsets = rec_get_offsets(rec, clust_index, NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); if (dict_index_has_virtual(index)) { @@ -995,7 +1001,8 @@ row_vers_old_has_index_entry( } } clust_offsets = rec_get_offsets(rec, clust_index, NULL, - true, + clust_index + ->n_core_fields, ULINT_UNDEFINED, &heap); } else { @@ -1074,7 +1081,8 @@ unsafe_to_purge: } clust_offsets = rec_get_offsets(prev_version, clust_index, - NULL, true, + NULL, + clust_index->n_core_fields, ULINT_UNDEFINED, &heap); if (dict_index_has_virtual(index)) { @@ -1215,7 +1223,7 @@ row_vers_build_for_consistent_read( *offsets = rec_get_offsets( prev_version, index, *offsets, - true, ULINT_UNDEFINED, offset_heap); + index->n_core_fields, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); @@ -1331,11 +1339,10 @@ committed_version_trx: semi-consistent read. */ version = rec; - *offsets = rec_get_offsets(version, - index, *offsets, - true, - ULINT_UNDEFINED, - offset_heap); + *offsets = rec_get_offsets( + version, index, *offsets, + index->n_core_fields, ULINT_UNDEFINED, + offset_heap); } buf = static_cast<byte*>( @@ -1378,7 +1385,8 @@ committed_version_trx: } version = prev_version; - *offsets = rec_get_offsets(version, index, *offsets, true, + *offsets = rec_get_offsets(version, index, *offsets, + index->n_core_fields, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index a5305684935..d47b33ee851 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -3,7 +3,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -248,9 +248,6 @@ ulong srv_buf_pool_load_pages_abort = LONG_MAX; /** Lock table size in bytes */ ulint srv_lock_table_size = ULINT_MAX; -/** innodb_idle_flush_pct */ -ulong srv_idle_flush_pct; - /** innodb_read_io_threads */ ulong srv_n_read_io_threads; /** innodb_write_io_threads */ @@ -484,9 +481,6 @@ current_time % 5 != 0. */ #endif /* MEM_PERIODIC_CHECK */ # define SRV_MASTER_DICT_LRU_INTERVAL (47) -/** Simulate compression failures. */ -UNIV_INTERN uint srv_simulate_comp_failures; - /** Buffer pool dump status frequence in percentages */ UNIV_INTERN ulong srv_buf_dump_status_frequency; diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 746935f5794..46c7dc785c8 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -3,7 +3,7 @@ Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -2005,7 +2005,7 @@ files_checked: to the data files and truncate or delete the log. Unless --export is specified, no further change to InnoDB files is needed. */ - ut_ad(!srv_force_recovery); + ut_ad(srv_force_recovery <= SRV_FORCE_IGNORE_CORRUPT); ut_ad(srv_n_log_files_found <= 1); ut_ad(recv_no_log_write); buf_flush_sync_all_buf_pools(); @@ -2546,6 +2546,19 @@ void innodb_shutdown() sync_check_close(); + srv_sys_space.shutdown(); + if (srv_tmp_space.get_sanity_check_status()) { + if (fil_system.temp_space) { + fil_system.temp_space->close(); + } + srv_tmp_space.delete_files(); + } + srv_tmp_space.shutdown(); + +#ifdef WITH_INNODB_DISALLOW_WRITES + os_event_destroy(srv_allow_writes_event); +#endif /* WITH_INNODB_DISALLOW_WRITES */ + if (srv_was_started && srv_print_verbose_log) { ib::info() << "Shutdown completed; log sequence number " << srv_shutdown_lsn diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index c7613b618a1..b25476861a5 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2019, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -710,7 +710,8 @@ fill_lock_data( ut_a(n_fields > 0); heap = NULL; - offsets = rec_get_offsets(rec, index, offsets, true, n_fields, &heap); + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, + n_fields, &heap); /* format and store the data */ @@ -1258,13 +1259,16 @@ static void fetch_data_into_cache(trx_i_s_cache_t *cache) /* Capture the state of transactions */ mutex_enter(&trx_sys.mutex); - for (const trx_t *trx= UT_LIST_GET_FIRST(trx_sys.trx_list); + for (trx_t *trx= UT_LIST_GET_FIRST(trx_sys.trx_list); trx != NULL; trx= UT_LIST_GET_NEXT(trx_list, trx)) { - if (trx_is_started(trx) && trx != purge_sys.query->trx) + if (trx->state != TRX_STATE_NOT_STARTED && trx != purge_sys.query->trx) { - fetch_data_into_cache_low(cache, trx); + mutex_enter(&trx->mutex); + if (trx->state != TRX_STATE_NOT_STARTED) + fetch_data_into_cache_low(cache, trx); + mutex_exit(&trx->mutex); if (cache->is_truncated) break; } diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc index fee96c44479..cd520f4f5f2 100644 --- a/storage/innobase/trx/trx0rec.cc +++ b/storage/innobase/trx/trx0rec.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2019, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2523,7 +2523,8 @@ trx_undo_prev_version_build( rec_offs offsets_dbg[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_dbg); ut_a(!rec_offs_any_null_extern( - *old_vers, rec_get_offsets(*old_vers, index, offsets_dbg, true, + *old_vers, rec_get_offsets(*old_vers, index, offsets_dbg, + index->n_core_fields, ULINT_UNDEFINED, &heap))); #endif // defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc index 3e29ad838c9..ed1499e1392 100644 --- a/storage/innobase/trx/trx0rseg.cc +++ b/storage/innobase/trx/trx0rseg.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -443,8 +443,13 @@ static void trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr) { - trx_rsegf_t* rseg_header = trx_rsegf_get_new( - rseg->space->id, rseg->page_no, mtr); + /* This is based on trx_rsegf_get_new(). + We need to access buf_block_t. */ + buf_block_t *block = buf_page_get( + page_id_t(rseg->space->id, rseg->page_no), 0, RW_S_LATCH, mtr); + buf_block_dbg_add_level(block, SYNC_RSEG_HEADER_NEW); + + const trx_rsegf_t* rseg_header = TRX_RSEG + block->frame; if (mach_read_from_4(rseg_header + TRX_RSEG_FORMAT) == 0) { trx_id_t id = mach_read_from_8(rseg_header @@ -455,32 +460,20 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr) } if (rseg_header[TRX_RSEG_BINLOG_NAME]) { - const char* binlog_name = reinterpret_cast<const char*> - (rseg_header) + TRX_RSEG_BINLOG_NAME; + lsn_t lsn = std::max(block->page.newest_modification, + mach_read_from_8(FIL_PAGE_LSN + + block->frame)); compile_time_assert(TRX_RSEG_BINLOG_NAME_LEN == sizeof trx_sys.recovered_binlog_filename); - - int cmp = *trx_sys.recovered_binlog_filename - ? strncmp(binlog_name, - trx_sys.recovered_binlog_filename, - TRX_RSEG_BINLOG_NAME_LEN) - : 1; - - if (cmp >= 0) { - uint64_t binlog_offset = mach_read_from_8( - rseg_header + TRX_RSEG_BINLOG_OFFSET); - if (cmp) { - memcpy(trx_sys. - recovered_binlog_filename, - binlog_name, - TRX_RSEG_BINLOG_NAME_LEN); - trx_sys.recovered_binlog_offset - = binlog_offset; - } else if (binlog_offset > - trx_sys.recovered_binlog_offset) { - trx_sys.recovered_binlog_offset - = binlog_offset; - } + if (lsn > trx_sys.recovered_binlog_lsn) { + trx_sys.recovered_binlog_lsn = lsn; + trx_sys.recovered_binlog_offset + = mach_read_from_8( + rseg_header + + TRX_RSEG_BINLOG_OFFSET); + memcpy(trx_sys.recovered_binlog_filename, + rseg_header + TRX_RSEG_BINLOG_NAME, + TRX_RSEG_BINLOG_NAME_LEN); } #ifdef WITH_WSREP diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index 87e85b85939..f59c1f96693 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 01cb842cc0b..1bcc92f8b97 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -161,6 +161,11 @@ trx_init( trx->lock.rec_cached = 0; trx->lock.table_cached = 0; +#ifdef WITH_WSREP + ut_ad(!trx->wsrep); + ut_ad(!trx->wsrep_event); + ut_ad(!trx->wsrep_UK_scan); +#endif /* WITH_WSREP */ ut_ad(trx->get_flush_observer() == NULL); } @@ -369,6 +374,7 @@ trx_t *trx_create() #ifdef WITH_WSREP trx->wsrep_event= NULL; + ut_ad(!trx->wsrep_UK_scan); #endif /* WITH_WSREP */ trx_sys.register_trx(trx); @@ -414,9 +420,11 @@ void trx_t::free() /* do not poison mutex */ MEM_NOACCESS(&id, sizeof id); MEM_NOACCESS(&no, sizeof no); - /* state is accessed by innobase_kill_connection() */ + MEM_NOACCESS(&state, sizeof state); MEM_NOACCESS(&is_recovered, sizeof is_recovered); - /* wsrep is accessed by innobase_kill_connection() */ +#ifdef WITH_WSREP + MEM_NOACCESS(&wsrep, sizeof wsrep); +#endif MEM_NOACCESS(&read_view, sizeof read_view); MEM_NOACCESS(&trx_list, sizeof trx_list); MEM_NOACCESS(&lock, sizeof lock); @@ -437,7 +445,7 @@ void trx_t::free() MEM_NOACCESS(&start_time_micro, sizeof start_time_micro); MEM_NOACCESS(&commit_lsn, sizeof commit_lsn); MEM_NOACCESS(&table_id, sizeof table_id); - /* mysql_thd is accessed by innobase_kill_connection() */ + MEM_NOACCESS(&mysql_thd, sizeof mysql_thd); MEM_NOACCESS(&mysql_log_file_name, sizeof mysql_log_file_name); MEM_NOACCESS(&mysql_log_offset, sizeof mysql_log_offset); MEM_NOACCESS(&n_mysql_tables_in_use, sizeof n_mysql_tables_in_use); @@ -473,6 +481,8 @@ void trx_t::free() MEM_NOACCESS(&flush_observer, sizeof flush_observer); #ifdef WITH_WSREP MEM_NOACCESS(&wsrep_event, sizeof wsrep_event); + ut_ad(!wsrep_UK_scan); + MEM_NOACCESS(&wsrep_UK_scan, sizeof wsrep_UK_scan); #endif /* WITH_WSREP */ MEM_NOACCESS(&magic_n, sizeof magic_n); trx_pools->mem_free(this); @@ -1262,16 +1272,6 @@ trx_update_mod_tables_timestamp( const time_t now = time(NULL); trx_mod_tables_t::const_iterator end = trx->mod_tables.end(); -#ifdef UNIV_DEBUG - const bool preserve_tables = !innodb_evict_tables_on_commit_debug - || trx->is_recovered /* avoid trouble with XA recovery */ -# if 1 /* if dict_stats_exec_sql() were not playing dirty tricks */ - || mutex_own(&dict_sys.mutex) -# else /* this would be more proper way to do it */ - || trx->dict_operation_lock_mode || trx->dict_operation -# endif - ; -#endif for (trx_mod_tables_t::const_iterator it = trx->mod_tables.begin(); it != end; @@ -1287,26 +1287,6 @@ trx_update_mod_tables_timestamp( intrusive. */ dict_table_t* table = it->first; table->update_time = now; -#ifdef UNIV_DEBUG - if (preserve_tables || table->get_ref_count() - || UT_LIST_GET_LEN(table->locks)) { - /* do not evict when committing DDL operations - or if some other transaction is holding the - table handle */ - continue; - } - /* recheck while holding the mutex that blocks - table->acquire() */ - mutex_enter(&dict_sys.mutex); - mutex_enter(&lock_sys.mutex); - const bool do_evict = !table->get_ref_count() - && !UT_LIST_GET_LEN(table->locks); - mutex_exit(&lock_sys.mutex); - if (do_evict) { - dict_sys.remove(table, true); - } - mutex_exit(&dict_sys.mutex); -#endif } trx->mod_tables.clear(); @@ -1392,16 +1372,9 @@ inline void trx_t::commit_in_memory(const mtr_t *mtr) so that there will be no race condition in lock_release(). */ while (UNIV_UNLIKELY(is_referenced())) ut_delay(srv_spin_wait_delay); - release_locks(); - id= 0; } else - { ut_ad(read_only || !rsegs.m_redo.rseg); - release_locks(); - } - - DEBUG_SYNC_C("after_trx_committed_in_memory"); if (read_only || !rsegs.m_redo.rseg) { @@ -1414,6 +1387,10 @@ inline void trx_t::commit_in_memory(const mtr_t *mtr) is_recovered= false; } + release_locks(); + id= 0; + DEBUG_SYNC_C("after_trx_committed_in_memory"); + while (dict_table_t *table= UT_LIST_GET_FIRST(lock.evicted_tables)) { UT_LIST_REMOVE(lock.evicted_tables, table); diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index fc2fbb7f240..a6a8661f699 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, 2020, MariaDB Corporation. +Copyright (c) 2017, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,6 +38,9 @@ Created 5/11/1994 Heikki Tuuri #include <string> #include "log.h" #include "my_cpu.h" +#ifndef DBUG_OFF +#include "rem0rec.h" +#endif /**********************************************************//** Returns the number of milliseconds since some epoch. The @@ -625,4 +628,49 @@ fatal_or_error::~fatal_or_error() } // namespace ib +#ifndef DBUG_OFF +static char dbug_print_buf[1024]; + +const char * dbug_print_rec(const rec_t* rec, const rec_offs* offsets) +{ + rec_printer r(rec, offsets); + strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1); + return dbug_print_buf; +} + +const char * dbug_print_rec(const rec_t* rec, ulint info, const rec_offs* offsets) +{ + rec_printer r(rec, info, offsets); + strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1); + return dbug_print_buf; +} + +const char * dbug_print_rec(const dtuple_t* tuple) +{ + rec_printer r(tuple); + strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1); + return dbug_print_buf; +} + +const char * dbug_print_rec(const dfield_t* field, ulint n) +{ + rec_printer r(field, n); + strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1); + return dbug_print_buf; +} + +const char * dbug_print_rec(const rec_t* rec, dict_index_t* index) +{ + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs* offsets = offsets_; + rec_offs_init(offsets_); + mem_heap_t* tmp_heap = NULL; + offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields, + ULINT_UNDEFINED, &tmp_heap); + rec_printer r(rec, offsets); + strmake(dbug_print_buf, r.str().c_str(), sizeof(dbug_print_buf) - 1); + return dbug_print_buf; +} +#endif /* !DBUG_OFF */ + #endif /* !UNIV_INNOCHECKSUM */ diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 1f734439ffe..aa99bc157c9 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -2523,9 +2523,6 @@ int ha_maria::info(uint flag) MARIA_INFO maria_info; char name_buff[FN_REFLEN]; - if (!table) - return 0; - (void) maria_status(file, &maria_info, flag); if (flag & HA_STATUS_VARIABLE) { diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c index fadf04861fa..4f3a2ae5f89 100644 --- a/storage/maria/ma_bitmap.c +++ b/storage/maria/ma_bitmap.c @@ -232,15 +232,16 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file, uint max_page_size; MARIA_FILE_BITMAP *bitmap= &share->bitmap; uint size= share->block_size; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); pgcache_page_no_t first_bitmap_with_space; #ifndef DBUG_OFF /* We want to have a copy of the bitmap to be able to print differences */ size*= 2; #endif - if (((bitmap->map= (uchar*) my_malloc(size, MYF(MY_WME))) == NULL) || + if (((bitmap->map= (uchar*) my_malloc(size, flag)) == NULL) || my_init_dynamic_array(&bitmap->pinned_pages, - sizeof(MARIA_PINNED_PAGE), 1, 1, MYF(0))) + sizeof(MARIA_PINNED_PAGE), 1, 1, flag)) return 1; bitmap->share= share; diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c index a82637e2b82..592aab6da41 100644 --- a/storage/maria/ma_blockrec.c +++ b/storage/maria/ma_blockrec.c @@ -485,10 +485,11 @@ my_bool _ma_init_block_record(MARIA_HA *info) { MARIA_ROW *row= &info->cur_row, *new_row= &info->new_row; MARIA_SHARE *share= info->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); uint default_extents; DBUG_ENTER("_ma_init_block_record"); - if (!my_multi_malloc(MY_WME, + if (!my_multi_malloc(flag, &row->empty_bits, share->base.pack_bytes, &row->field_lengths, share->base.max_field_lengths + 2, @@ -527,13 +528,12 @@ my_bool _ma_init_block_record(MARIA_HA *info) FULL_PAGE_SIZE(share) / BLOB_SEGMENT_MIN_SIZE)); - if (my_init_dynamic_array(&info->bitmap_blocks, - sizeof(MARIA_BITMAP_BLOCK), default_extents, - 64, MYF(0))) + if (my_init_dynamic_array(&info->bitmap_blocks, sizeof(MARIA_BITMAP_BLOCK), + default_extents, 64, flag)) goto err; info->cur_row.extents_buffer_length= default_extents * ROW_EXTENT_SIZE; if (!(info->cur_row.extents= my_malloc(info->cur_row.extents_buffer_length, - MYF(MY_WME)))) + flag))) goto err; info->row_base_length= share->base_length; @@ -2642,6 +2642,7 @@ static my_bool write_block_record(MARIA_HA *info, LSN lsn; my_off_t position; uint save_my_errno; + myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("write_block_record"); head_block= bitmap_blocks->block; @@ -2708,7 +2709,7 @@ static my_bool write_block_record(MARIA_HA *info, for every data segment we want to store. */ if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, - row->head_length)) + row->head_length, myflag)) DBUG_RETURN(1); tmp_data_used= 0; /* Either 0 or last used uchar in 'data' */ @@ -4718,6 +4719,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record, MARIA_EXTENT_CURSOR extent; MARIA_COLUMNDEF *column, *end_column; MARIA_ROW *cur_row= &info->cur_row; + myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("_ma_read_block_record2"); start_of_data= data; @@ -4763,7 +4765,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record, if (cur_row->extents_buffer_length < row_extent_size && _ma_alloc_buffer(&cur_row->extents, &cur_row->extents_buffer_length, - row_extent_size)) + row_extent_size, myflag)) DBUG_RETURN(my_errno); memcpy(cur_row->extents, data, ROW_EXTENT_SIZE); data+= ROW_EXTENT_SIZE; @@ -4944,7 +4946,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record, cur_row->blob_length= blob_lengths; DBUG_PRINT("info", ("Total blob length: %lu", blob_lengths)); if (_ma_alloc_buffer(&info->blob_buff, &info->blob_buff_size, - blob_lengths)) + blob_lengths, myflag)) DBUG_RETURN(my_errno); blob_buffer= info->blob_buff; } @@ -5050,6 +5052,7 @@ static my_bool read_row_extent_info(MARIA_HA *info, uchar *buff, uint flag, row_extents, row_extents_size; uint field_lengths __attribute__ ((unused)); uchar *extents, *end; + myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("read_row_extent_info"); if (!(data= get_record_position(share, buff, @@ -5073,7 +5076,7 @@ static my_bool read_row_extent_info(MARIA_HA *info, uchar *buff, if (info->cur_row.extents_buffer_length < row_extents_size && _ma_alloc_buffer(&info->cur_row.extents, &info->cur_row.extents_buffer_length, - row_extents_size)) + row_extents_size, myflag)) DBUG_RETURN(1); memcpy(info->cur_row.extents, data, ROW_EXTENT_SIZE); data+= ROW_EXTENT_SIZE; @@ -5244,6 +5247,7 @@ my_bool _ma_cmp_block_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def, my_bool _ma_scan_init_block_record(MARIA_HA *info) { MARIA_SHARE *share= info->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("_ma_scan_init_block_record"); DBUG_ASSERT(info->dfile.file == share->bitmap.file.file); @@ -5253,7 +5257,7 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info) */ if (!(info->scan.bitmap_buff || ((info->scan.bitmap_buff= - (uchar *) my_malloc(share->block_size * 2, MYF(MY_WME)))))) + (uchar *) my_malloc(share->block_size * 2, flag))))) DBUG_RETURN(1); info->scan.page_buff= info->scan.bitmap_buff + share->block_size; info->scan.bitmap_end= info->scan.bitmap_buff + share->bitmap.max_total_size; diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 26a9241c205..e1722d7f2a8 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -1271,6 +1271,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend, ulong UNINIT_VAR(left_length); uint b_type; char llbuff[22],llbuff2[22],llbuff3[22]; + myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("check_dynamic_record"); pos= 0; @@ -1378,7 +1379,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend, { if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, block_info.rec_len + - share->base.extra_rec_buff_size)) + share->base.extra_rec_buff_size, myflag)) { _ma_check_print_error(param, @@ -2694,7 +2695,7 @@ int maria_repair(HA_CHECK *param, register MARIA_HA *info, (uchar *) my_malloc((uint) share->base.default_rec_buff_size, MYF(0))) || _ma_alloc_buffer(&sort_param.rec_buff, &sort_param.rec_buff_size, - share->base.default_rec_buff_size)) + share->base.default_rec_buff_size, MYF(0))) { _ma_check_print_error(param, "Not enough memory for extra record"); goto err; @@ -3782,7 +3783,7 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info, (uchar*) my_malloc((size_t) share->base.default_rec_buff_size, MYF(0))) || _ma_alloc_buffer(&sort_param.rec_buff, &sort_param.rec_buff_size, - share->base.default_rec_buff_size)) + share->base.default_rec_buff_size, MYF(0))) { _ma_check_print_error(param, "Not enough memory for extra record"); goto err; @@ -4425,7 +4426,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+ (share->base.pack_reclength * i)); if (_ma_alloc_buffer(&sort_param[i].rec_buff, &sort_param[i].rec_buff_size, - share->base.default_rec_buff_size)) + share->base.default_rec_buff_size, MYF(0))) { _ma_check_print_error(param,"Not enough memory!"); goto err; @@ -5155,7 +5156,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param) if (_ma_alloc_buffer(&sort_param->rec_buff, &sort_param->rec_buff_size, block_info.rec_len + - share->base.extra_rec_buff_size)) + share->base.extra_rec_buff_size, MYF(0))) { if (param->max_record_length >= block_info.rec_len) diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c index ba147f08b45..85e158a9fd6 100644 --- a/storage/maria/ma_create.c +++ b/storage/maria/ma_create.c @@ -64,10 +64,10 @@ int maria_create(const char *name, enum data_file_type datafile_type, uint uniques, MARIA_UNIQUEDEF *uniquedefs, MARIA_CREATE_INFO *ci,uint flags) { - register uint i,j; + uint i,j; File UNINIT_VAR(dfile), UNINIT_VAR(file); int errpos,save_errno, create_mode= O_RDWR | O_TRUNC, res; - myf create_flag; + myf create_flag, common_flag= MY_WME, sync_dir= 0; uint length,max_key_length,packed,pack_bytes,pointer,real_length_diff, key_length,info_length,key_segs,options,min_key_length, base_pos,long_varchar_count, @@ -93,7 +93,6 @@ int maria_create(const char *name, enum data_file_type datafile_type, MARIA_CREATE_INFO tmp_create_info; my_bool tmp_table= FALSE; /* cache for presence of HA_OPTION_TMP_TABLE */ my_bool forced_packed; - myf sync_dir= 0; uchar *log_data= NULL; my_bool encrypted= maria_encrypt_tables && datafile_type == BLOCK_RECORD; my_bool insert_order= MY_TEST(flags & HA_PRESERVE_INSERT_ORDER); @@ -104,6 +103,9 @@ int maria_create(const char *name, enum data_file_type datafile_type, DBUG_ASSERT(maria_inited); + if (flags & HA_CREATE_TMP_TABLE) + common_flag|= MY_THREAD_SPECIFIC; + if (!ci) { bzero((char*) &tmp_create_info,sizeof(tmp_create_info)); @@ -148,7 +150,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, (double*) my_malloc((keys + uniques)*HA_MAX_KEY_SEG*sizeof(double) + (keys + uniques)*HA_MAX_KEY_SEG*sizeof(ulong) + sizeof(uint16) * columns, - MYF(MY_WME | MY_ZEROFILL)))) + MYF(common_flag | MY_ZEROFILL)))) DBUG_RETURN(my_errno); nulls_per_key_part= (ulong*) (rec_per_key_part + (keys + uniques) * HA_MAX_KEY_SEG); @@ -924,7 +926,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, if ((file= mysql_file_create_with_symlink(key_file_kfile, klinkname_ptr, kfilename, 0, create_mode, - MYF(MY_WME|create_flag))) < 0) + MYF(common_flag|create_flag))) < 0) goto err; errpos=1; @@ -1027,7 +1029,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, MARIA_COLUMNDEF **col_order, **pos; if (!(col_order= (MARIA_COLUMNDEF**) my_malloc(share.base.fields * sizeof(MARIA_COLUMNDEF*), - MYF(MY_WME)))) + common_flag))) goto err; for (column= columndef, pos= col_order ; column != end_column ; @@ -1206,8 +1208,8 @@ int maria_create(const char *name, enum data_file_type datafile_type, } if ((dfile= mysql_file_create_with_symlink(key_file_dfile, dlinkname_ptr, - dfilename, 0, create_mode, - MYF(MY_WME | create_flag | sync_dir))) < 0) + dfilename, 0, create_mode, + MYF(common_flag | create_flag | sync_dir))) < 0) goto err; errpos=3; diff --git a/storage/maria/ma_crypt.c b/storage/maria/ma_crypt.c index 95b84d38221..564edb4bbe8 100644 --- a/storage/maria/ma_crypt.c +++ b/storage/maria/ma_crypt.c @@ -268,7 +268,7 @@ static my_bool ma_crypt_data_pre_write_hook(PAGECACHE_IO_HOOK_ARGS *args) return 1; } - if (!share->now_transactional) + if (!share->base.born_transactional) { /* store a random number instead of LSN (for counter block) */ store_rand_lsn(args->page); @@ -392,7 +392,7 @@ static my_bool ma_crypt_index_pre_write_hook(PAGECACHE_IO_HOOK_ARGS *args) return 1; } - if (!share->now_transactional) + if (!share->base.born_transactional) { /* store a random number instead of LSN (for counter block) */ store_rand_lsn(args->page); diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c index ae6fc57c114..829e5b5cd02 100644 --- a/storage/maria/ma_dynrec.c +++ b/storage/maria/ma_dynrec.c @@ -1477,6 +1477,8 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf, File file; uchar *UNINIT_VAR(to); uint UNINIT_VAR(left_length); + MARIA_SHARE *share= info->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("_ma_read_dynamic_record"); if (filepos == HA_OFFSET_ERROR) @@ -1507,13 +1509,13 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf, if (block_of_record++ == 0) /* First block */ { info->cur_row.total_length= block_info.rec_len; - if (block_info.rec_len > (uint) info->s->base.max_pack_length) + if (block_info.rec_len > (uint) share->base.max_pack_length) goto panic; - if (info->s->base.blobs) + if (share->base.blobs) { if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, block_info.rec_len + - info->s->base.extra_rec_buff_size)) + share->base.extra_rec_buff_size, flag)) goto err; } to= info->rec_buff; @@ -1549,7 +1551,7 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf, there is no equivalent without seeking. We are at the right position already. :( */ - if (info->s->file_read(info, to, block_info.data_len, + if (share->file_read(info, to, block_info.data_len, filepos, MYF(MY_NABP))) goto panic; left_length-=block_info.data_len; @@ -1769,6 +1771,7 @@ int _ma_read_rnd_dynamic_record(MARIA_HA *info, uchar *UNINIT_VAR(to); MARIA_BLOCK_INFO block_info; MARIA_SHARE *share= info->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("_ma_read_rnd_dynamic_record"); #ifdef MARIA_EXTERNAL_LOCKING @@ -1859,7 +1862,7 @@ int _ma_read_rnd_dynamic_record(MARIA_HA *info, { if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, block_info.rec_len + - info->s->base.extra_rec_buff_size)) + share->base.extra_rec_buff_size, flag)) goto err; } to= info->rec_buff; diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index b464cf4f94e..2573133ece5 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -533,6 +533,7 @@ int maria_reset(MARIA_HA *info) { int error= 0; MARIA_SHARE *share= info->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("maria_reset"); /* Free buffers and reset the following flags: @@ -553,13 +554,13 @@ int maria_reset(MARIA_HA *info) { info->rec_buff_size= 1; /* Force realloc */ _ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, - share->base.default_rec_buff_size); + share->base.default_rec_buff_size, flag); } if (info->blob_buff_size > MARIA_SMALL_BLOB_BUFFER) { info->blob_buff_size= 1; /* Force realloc */ _ma_alloc_buffer(&info->blob_buff, &info->blob_buff_size, - MARIA_SMALL_BLOB_BUFFER); + MARIA_SMALL_BLOB_BUFFER, flag); } } #if defined(HAVE_MMAP) && defined(HAVE_MADVISE) diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index 89848fae03e..06183c72895 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -39,7 +39,7 @@ static void maria_scan_end_dummy(MARIA_HA *info); static my_bool maria_once_init_dummy(MARIA_SHARE *, File); static my_bool maria_once_end_dummy(MARIA_SHARE *); static uchar *_ma_base_info_read(uchar *ptr, MARIA_BASE_INFO *base); -static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state); +static uchar *_ma_state_info_read(uchar *, MARIA_STATE_INFO *, myf); #define get_next_element(to,pos,size) { memcpy((char*) to,pos,(size_t) size); \ pos+=size;} @@ -98,6 +98,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, uint errpos; MARIA_HA info,*m_info; my_bitmap_map *changed_fields_bitmap; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("maria_clone_internal"); errpos= 0; @@ -115,7 +116,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, errpos= 5; /* alloc and set up private structure parts */ - if (!my_multi_malloc(MY_WME, + if (!my_multi_malloc(flag, &m_info,sizeof(MARIA_HA), &info.blobs,sizeof(MARIA_BLOB)*share->base.blobs, &info.buff,(share->base.max_key_block_length*2+ @@ -163,10 +164,9 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, goto err; /* The following should be big enough for all pinning purposes */ - if (my_init_dynamic_array(&info.pinned_pages, - sizeof(MARIA_PINNED_PAGE), + if (my_init_dynamic_array(&info.pinned_pages, sizeof(MARIA_PINNED_PAGE), MY_MAX(share->base.blobs*2 + 4, - MARIA_MAX_TREE_LEVELS*3), 16, MYF(0))) + MARIA_MAX_TREE_LEVELS*3), 16, flag)) goto err; @@ -202,7 +202,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, /* Allocate buffer for one record */ /* prerequisites: info->rec_buffer == 0 && info->rec_buff_size == 0 */ if (_ma_alloc_buffer(&info.rec_buff, &info.rec_buff_size, - share->base.default_rec_buff_size)) + share->base.default_rec_buff_size, flag)) goto err; bzero(info.rec_buff, share->base.default_rec_buff_size); @@ -265,6 +265,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) uint i,j,len,errpos,head_length,base_pos,keys, realpath_err, key_parts,base_key_parts,unique_key_parts,fulltext_keys,uniques; uint internal_table= MY_TEST(open_flags & HA_OPEN_INTERNAL_TABLE); + myf common_flag= open_flags & HA_OPEN_TMP_TABLE ? MY_THREAD_SPECIFIC : 0; uint file_version; size_t info_length; char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN], @@ -322,13 +323,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) DEBUG_SYNC_C("mi_open_kfile"); if ((kfile=mysql_file_open(key_file_kfile, name_buff, (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW | O_CLOEXEC, - MYF(MY_NOSYMLINKS))) < 0) + MYF(common_flag | MY_NOSYMLINKS))) < 0) { if ((errno != EROFS && errno != EACCES) || mode != O_RDONLY || (kfile=mysql_file_open(key_file_kfile, name_buff, (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW | O_CLOEXEC, - MYF(MY_NOSYMLINKS))) < 0) + MYF(common_flag | MY_NOSYMLINKS))) < 0) goto err; } share->mode=open_mode; @@ -393,7 +394,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) Allocate space for header information and for data that is too big to keep on stack */ - if (!(disk_cache= my_malloc(info_length+128, MYF(MY_WME)))) + if (!(disk_cache= my_malloc(info_length+128, MYF(MY_WME | common_flag)))) { my_errno=ENOMEM; goto err; @@ -420,7 +421,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) } share->state_diff_length=len-MARIA_STATE_INFO_SIZE; - if (!_ma_state_info_read(disk_cache, &share->state)) + if (!_ma_state_info_read(disk_cache, &share->state, common_flag)) goto err; len= mi_uint2korr(share->state.header.base_info_length); if (len != MARIA_BASE_INFO_SIZE) @@ -561,12 +562,10 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) share->index_file_name.length= strlen(index_name); share->data_file_name.length= strlen(data_name); share->open_file_name.length= strlen(name); - if (!my_multi_malloc(MY_WME, + if (!my_multi_malloc(MYF(MY_WME | common_flag), &share,sizeof(*share), - &rec_per_key_part, - sizeof(double) * key_parts, - &nulls_per_key_part, - sizeof(long)* key_parts, + &rec_per_key_part, sizeof(double) * key_parts, + &nulls_per_key_part, sizeof(long)* key_parts, &share->keyinfo,keys*sizeof(MARIA_KEYDEF), &share->uniqueinfo,uniques*sizeof(MARIA_UNIQUEDEF), &share->keyparts, @@ -883,9 +882,9 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) share->options|= HA_OPTION_READ_ONLY_DATA; share->is_log_table= FALSE; - if (open_flags & HA_OPEN_TMP_TABLE || - (share->options & HA_OPTION_TMP_TABLE)) + if (open_flags & HA_OPEN_TMP_TABLE || share->options & HA_OPTION_TMP_TABLE) { + common_flag|= MY_THREAD_SPECIFIC; share->options|= HA_OPTION_TMP_TABLE; share->temporary= share->delay_key_write= 1; share->write_flag=MYF(MY_NABP); @@ -1113,13 +1112,13 @@ err: */ my_bool _ma_alloc_buffer(uchar **old_addr, size_t *old_size, - size_t new_size) + size_t new_size, myf flag) { if (*old_size < new_size) { uchar *addr; if (!(addr= (uchar*) my_realloc(*old_addr, new_size, - MYF(MY_ALLOW_ZERO_PTR)))) + MYF(MY_ALLOW_ZERO_PTR | flag)))) return 1; *old_addr= addr; *old_size= new_size; @@ -1498,7 +1497,7 @@ uint _ma_state_info_write_sub(File file, MARIA_STATE_INFO *state, uint pWrite) } -static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state) +static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state, myf flag) { uint i,keys,key_parts; DBUG_ENTER("_ma_state_info_read"); @@ -1510,7 +1509,7 @@ static uchar *_ma_state_info_read(uchar *ptr, MARIA_STATE_INFO *state) /* Allocate memory for key parts if not already done */ if (!state->rec_per_key_part && - !my_multi_malloc(MY_WME, + !my_multi_malloc(MYF(MY_WME | flag), &state->rec_per_key_part, sizeof(*state->rec_per_key_part) * key_parts, &state->nulls_per_key_part, @@ -1955,6 +1954,8 @@ void _ma_set_index_pagecache_callbacks(PAGECACHE_FILE *file, int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share) { myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS : 0); + if (share->temporary) + flags|= MY_THREAD_SPECIFIC; DEBUG_SYNC_C("mi_open_datafile"); info->dfile.file= share->bitmap.file.file= mysql_file_open(key_file_dfile, share->data_file_name.str, diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c index e42e9300d14..d1c30a57146 100644 --- a/storage/maria/ma_packrec.c +++ b/storage/maria/ma_packrec.c @@ -1413,10 +1413,12 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, { uchar *header= info->header; uint head_length,UNINIT_VAR(ref_length); + MARIA_SHARE *share= maria->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); if (file >= 0) { - ref_length=maria->s->pack.ref_length; + ref_length=share->pack.ref_length; /* We can't use my_pread() here because _ma_read_rnd_pack_record assumes position is ok @@ -1426,11 +1428,11 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, return BLOCK_FATAL_ERROR; DBUG_DUMP("header", header, ref_length); } - head_length= read_pack_length((uint) maria->s->pack.version, header, + head_length= read_pack_length((uint) share->pack.version, header, &info->rec_len); - if (maria->s->base.blobs) + if (share->base.blobs) { - head_length+= read_pack_length((uint) maria->s->pack.version, + head_length+= read_pack_length((uint) share->pack.version, header + head_length, &info->blob_len); /* Ensure that the record buffer is big enough for the compressed @@ -1439,7 +1441,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, */ if (_ma_alloc_buffer(rec_buff_p, rec_buff_size_p, info->rec_len + info->blob_len + - maria->s->base.extra_rec_buff_size)) + share->base.extra_rec_buff_size, flag)) return BLOCK_FATAL_ERROR; /* not enough memory */ bit_buff->blob_pos= *rec_buff_p + info->rec_len; bit_buff->blob_end= bit_buff->blob_pos + info->blob_len; @@ -1580,15 +1582,18 @@ _ma_mempack_get_block_info(MARIA_HA *maria, size_t *rec_buff_size_p, uchar *header) { - header+= read_pack_length((uint) maria->s->pack.version, header, + MARIA_SHARE *share= maria->s; + myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); + + header+= read_pack_length((uint) share->pack.version, header, &info->rec_len); - if (maria->s->base.blobs) + if (share->base.blobs) { - header+= read_pack_length((uint) maria->s->pack.version, header, + header+= read_pack_length((uint) share->pack.version, header, &info->blob_len); /* _ma_alloc_rec_buff sets my_errno on error */ if (_ma_alloc_buffer(rec_buff_p, rec_buff_size_p, - info->blob_len + maria->s->base.extra_rec_buff_size)) + info->blob_len + share->base.extra_rec_buff_size, flag)) return 0; /* not enough memory */ bit_buff->blob_pos= *rec_buff_p; bit_buff->blob_end= *rec_buff_p + info->blob_len; diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index 8f108e3f03f..be8a9fe8b2a 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -1453,6 +1453,7 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id) } if (maria_is_crashed(info)) { + tprint(tracef, "\n"); eprint(tracef, "Table '%s' is crashed, skipping it. Please repair it with" " aria_chk -r", share->open_file_name.str); recovery_found_crashed_tables++; diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index c89623daea9..6c9649ede45 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -1198,7 +1198,7 @@ extern my_bool _ma_read_cache(MARIA_HA *, IO_CACHE *info, uchar *buff, uint re_read_if_possibly); extern ulonglong ma_retrieve_auto_increment(const uchar *key, uint8 key_type); extern my_bool _ma_alloc_buffer(uchar **old_addr, size_t *old_size, - size_t new_size); + size_t new_size, myf flag); extern size_t _ma_rec_unpack(MARIA_HA *info, uchar *to, uchar *from, size_t reclength); extern my_bool _ma_rec_check(MARIA_HA *info, const uchar *record, diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index 8638399717e..fdca803ad96 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -550,9 +550,6 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation) case HA_EXTRA_END_ALTER_COPY: inspected = "HA_EXTRA_END_ALTER_COPY"; break; - case HA_EXTRA_FAKE_START_STMT: - inspected = "HA_EXTRA_FAKE_START_STMT"; - break; #ifdef MRN_HAVE_HA_EXTRA_EXPORT case HA_EXTRA_EXPORT: inspected = "HA_EXTRA_EXPORT"; @@ -5919,7 +5916,7 @@ int ha_mroonga::wrapper_write_row_index(const uchar *buf) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -5994,7 +5991,7 @@ int ha_mroonga::storage_write_row(const uchar *buf) DBUG_RETURN(error); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; @@ -6275,7 +6272,7 @@ int ha_mroonga::storage_write_row_multiple_column_indexes(const uchar *buf, int error = 0; - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -6569,7 +6566,7 @@ int ha_mroonga::wrapper_update_row_index(const uchar *old_data, DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -6690,7 +6687,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, grn_obj new_value; GRN_VOID_INIT(&new_value); { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); generic_store_bulk(field, &new_value); } grn_obj casted_value; @@ -6719,7 +6716,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, storage_store_fields_for_prep_update(old_data, new_data, record_id); { mrn::Lock lock(&(share->record_mutex), have_unique_index()); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); if ((error = storage_prepare_delete_row_unique_indexes(old_data, record_id))) { DBUG_RETURN(error); @@ -6744,7 +6741,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, #endif if (bitmap_is_set(table->write_set, field->field_index)) { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); DBUG_PRINT("info", ("mroonga: update column %d(%d)",i,field->field_index)); if (field->is_null()) continue; @@ -6821,7 +6818,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, if (table->found_next_number_field && !table->s->next_number_keypart && new_data == table->record[0]) { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); Field_num *field = (Field_num *) table->found_next_number_field; if (field->unsigned_flag || field->val_int() > 0) { MRN_LONG_TERM_SHARE *long_term_share = share->long_term_share; @@ -6878,7 +6875,7 @@ int ha_mroonga::storage_update_row_index(const uchar *old_data, my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(old_data, table->record[0]); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; mrn_change_encoding(ctx, NULL); @@ -7094,7 +7091,7 @@ int ha_mroonga::wrapper_delete_row_index(const uchar *buf) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -7245,7 +7242,7 @@ int ha_mroonga::storage_delete_row_index(const uchar *buf) GRN_TEXT_INIT(&key, 0); GRN_TEXT_INIT(&encoded_key, 0); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; mrn_change_encoding(ctx, NULL); @@ -11436,7 +11433,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id) } } - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index)); field->move_field_offset(ptr_diff); if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { @@ -11501,7 +11498,7 @@ void ha_mroonga::storage_store_fields_for_prep_update(const uchar *old_data, ) #endif ) { - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index)); grn_obj value; GRN_OBJ_INIT(&value, GRN_BULK, 0, grn_obj_get_range(ctx, grn_columns[i])); @@ -11537,7 +11534,7 @@ void ha_mroonga::storage_store_fields_by_index(uchar *buf) if (KEY_N_KEY_PARTS(key_info) == 1) { my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(buf, table->record[0]); Field *field = key_info->key_part->field; - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); field->move_field_offset(ptr_diff); storage_store_field(field, (const char *)key, key_length); field->move_field_offset(-ptr_diff); diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp index 778300a33d6..cb2ce7e35ca 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.cpp +++ b/storage/mroonga/lib/mrn_debug_column_access.cpp @@ -20,7 +20,7 @@ #include "mrn_debug_column_access.hpp" namespace mrn { - DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap) + DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap) : table_(table), bitmap_(bitmap) { #ifdef DBUG_ASSERT_EXISTS diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp index 7c2fd60344e..954e04135f8 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.hpp +++ b/storage/mroonga/lib/mrn_debug_column_access.hpp @@ -25,12 +25,12 @@ namespace mrn { class DebugColumnAccess { TABLE *table_; - MY_BITMAP *bitmap_; + MY_BITMAP **bitmap_; #ifdef DBUG_ASSERT_EXISTS - my_bitmap_map *map_; + MY_BITMAP *map_; #endif public: - DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap); + DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap); ~DebugColumnAccess(); }; } diff --git a/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp b/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp index 5536eecb255..73639685d0e 100644 --- a/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp +++ b/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp @@ -675,7 +675,8 @@ namespace mrn { &normalized, &normalized_length, NULL); uint16 new_blob_data_length; if (normalized_length <= UINT_MAX16) { - memcpy(grn_key, normalized, normalized_length); + if (normalized_length) + memcpy(grn_key, normalized, normalized_length); if (normalized_length < *mysql_key_size) { memset(grn_key + normalized_length, '\0', *mysql_key_size - normalized_length); diff --git a/storage/mroonga/vendor/groonga/lib/alloc.c b/storage/mroonga/vendor/groonga/lib/alloc.c index 2e28431595a..5e556b83712 100644 --- a/storage/mroonga/vendor/groonga/lib/alloc.c +++ b/storage/mroonga/vendor/groonga/lib/alloc.c @@ -310,13 +310,13 @@ grn_alloc_info_free(grn_ctx *ctx) } #endif /* USE_MEMORY_DEBUG */ -#define GRN_CTX_SEGMENT_SIZE (1<<22) +#define GRN_CTX_SEGMENT_SIZE (1U <<22) #define GRN_CTX_SEGMENT_MASK (GRN_CTX_SEGMENT_SIZE - 1) -#define GRN_CTX_SEGMENT_WORD (1<<31) -#define GRN_CTX_SEGMENT_VLEN (1<<30) -#define GRN_CTX_SEGMENT_LIFO (1<<29) -#define GRN_CTX_SEGMENT_DIRTY (1<<28) +#define GRN_CTX_SEGMENT_WORD (1U <<31) +#define GRN_CTX_SEGMENT_VLEN (1U <<30) +#define GRN_CTX_SEGMENT_LIFO (1U <<29) +#define GRN_CTX_SEGMENT_DIRTY (1U <<28) void grn_alloc_init_ctx_impl(grn_ctx *ctx) @@ -400,8 +400,8 @@ grn_ctx_alloc(grn_ctx *ctx, size_t size, int flags, header[0] = i; header[1] = (int32_t) size; } else { - i = ctx->impl->currseg; - mi = &ctx->impl->segs[i]; + if ((i = ctx->impl->currseg) >= 0) + mi = &ctx->impl->segs[i]; if (i < 0 || size + mi->nref > GRN_CTX_SEGMENT_SIZE) { for (i = 0, mi = ctx->impl->segs;; i++, mi++) { if (i >= GRN_CTX_N_SEGMENTS) { diff --git a/storage/mroonga/vendor/groonga/lib/db.c b/storage/mroonga/vendor/groonga/lib/db.c index f3769f9aa4c..418335aaf00 100644 --- a/storage/mroonga/vendor/groonga/lib/db.c +++ b/storage/mroonga/vendor/groonga/lib/db.c @@ -12494,7 +12494,7 @@ grn_db_init_builtin_types(grn_ctx *ctx) GRN_OBJ_KEY_VAR_SIZE, 1 << 16); if (!obj || DB_OBJ(obj)->id != GRN_DB_TEXT) { return GRN_FILE_CORRUPT; } obj = deftype(ctx, "LongText", - GRN_OBJ_KEY_VAR_SIZE, 1 << 31); + GRN_OBJ_KEY_VAR_SIZE, 1U << 31); if (!obj || DB_OBJ(obj)->id != GRN_DB_LONG_TEXT) { return GRN_FILE_CORRUPT; } obj = deftype(ctx, "TokyoGeoPoint", GRN_OBJ_KEY_GEO_POINT, sizeof(grn_geo_point)); diff --git a/storage/mroonga/vendor/groonga/lib/pat.c b/storage/mroonga/vendor/groonga/lib/pat.c index 642173e2fdc..01f6108fbd0 100644 --- a/storage/mroonga/vendor/groonga/lib/pat.c +++ b/storage/mroonga/vendor/groonga/lib/pat.c @@ -899,7 +899,7 @@ chop(grn_ctx *ctx, grn_pat *pat, const char **key, const char *end, uint32_t *lk case GRN_OBJ_KEY_FLOAT :\ if ((size) == sizeof(int64_t)) {\ int64_t v = *(int64_t *)(key);\ - v ^= ((v >> 63)|(1LL << 63));\ + v ^= ((v >> 63)|(1ULL << 63));\ grn_hton((keybuf), &v, (size));\ }\ break;\ @@ -924,7 +924,7 @@ chop(grn_ctx *ctx, grn_pat *pat, const char **key, const char *end, uint32_t *lk if ((size) == sizeof(int64_t)) {\ int64_t v;\ grn_hton(&v, (key), (size));\ - *((int64_t *)(keybuf)) = v ^ (((v^(1LL<<63))>> 63)|(1LL<<63)); \ + *((int64_t *)(keybuf)) = v ^ ((((int64_t)(v^(1ULL<<63)))>> 63)|(1ULL<<63)); \ }\ break;\ }\ diff --git a/storage/mroonga/vendor/groonga/lib/proc/proc_select.c b/storage/mroonga/vendor/groonga/lib/proc/proc_select.c index 605fd42239f..1f2a5005401 100644 --- a/storage/mroonga/vendor/groonga/lib/proc/proc_select.c +++ b/storage/mroonga/vendor/groonga/lib/proc/proc_select.c @@ -2989,7 +2989,8 @@ grn_select(grn_ctx *ctx, grn_select_data *data) char *cp = cache_key; #define PUT_CACHE_KEY(string) \ - grn_memcpy(cp, (string).value, (string).length); \ + if ((string).value) \ + grn_memcpy(cp, (string).value, (string).length); \ cp += (string).length; \ *cp++ = '\0' diff --git a/storage/mroonga/vendor/groonga/lib/str.c b/storage/mroonga/vendor/groonga/lib/str.c index 6b2d17769ca..4f0a3a98699 100644 --- a/storage/mroonga/vendor/groonga/lib/str.c +++ b/storage/mroonga/vendor/groonga/lib/str.c @@ -46,7 +46,7 @@ grn_str_charlen_utf8(grn_ctx *ctx, const unsigned char *str, const unsigned char if (*str & 0x80) { int i; int len; - GRN_BIT_SCAN_REV(~(*str << 24), len); + GRN_BIT_SCAN_REV(~(((uint) *str) << 24), len); len = 31 - len; if ((unsigned int)(len - 2) >= 3) { /* (len == 1 || len >= 5) */ GRN_LOG(ctx, GRN_LOG_WARNING, @@ -1963,7 +1963,8 @@ grn_bulk_write(grn_ctx *ctx, grn_obj *buf, const char *str, unsigned int len) if ((rc = grn_bulk_resize(ctx, buf, GRN_BULK_VSIZE(buf) + len))) { return rc; } } curr = GRN_BULK_CURR(buf); - grn_memcpy(curr, str, len); + if (str) + grn_memcpy(curr, str, len); GRN_BULK_INCR_LEN(buf, len); return rc; } diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index ba6744ae815..8a0ca759871 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -1952,7 +1952,7 @@ static void make_traverse_code_tree(HUFF_TREE *huff_tree, { chr=element->a.leaf.element_nr; huff_tree->code_len[chr]= (uchar) (8 * sizeof(ulonglong) - size); - huff_tree->code[chr]= (code >> size); + huff_tree->code[chr]= (size == 8 * sizeof(ulonglong)) ? 0 : (code >> size); if (huff_tree->height < 8 * sizeof(ulonglong) - size) huff_tree->height= 8 * sizeof(ulonglong) - size; } @@ -2943,12 +2943,15 @@ static void flush_bits(void) ulonglong bit_buffer; bits= file_buffer.bits & ~7; - bit_buffer= file_buffer.bitbucket >> bits; - bits= BITS_SAVED - bits; - while (bits > 0) + if (bits != BITS_SAVED) { - bits-= 8; - *file_buffer.pos++= (uchar) (bit_buffer >> bits); + bit_buffer= file_buffer.bitbucket >> bits; + bits= BITS_SAVED - bits; + while (bits > 0) + { + bits-= 8; + *file_buffer.pos++= (uchar) (bit_buffer >> bits); + } } if (file_buffer.pos >= file_buffer.end) (void) flush_buffer(~ (ulong) 0); diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc index fd715c57a1f..e0e81f7cddc 100644 --- a/storage/oqgraph/ha_oqgraph.cc +++ b/storage/oqgraph/ha_oqgraph.cc @@ -908,7 +908,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, bmove_align(buf, table->s->default_values, table->s->reclength); key_restore(buf, (byte*) key, key_info, key_len); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); my_ptrdiff_t ptrdiff= buf - table->record[0]; if (ptrdiff) @@ -937,7 +937,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, field[1]->move_field_offset(-ptrdiff); field[2]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return error_code(oqgraph::NO_MORE_DATA); } } @@ -962,7 +962,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, field[1]->move_field_offset(-ptrdiff); field[2]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); // Keep the latch around so we can use it in the query result later - // See fill_record(). @@ -995,7 +995,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row) bmove_align(record, table->s->default_values, table->s->reclength); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); my_ptrdiff_t ptrdiff= record - table->record[0]; if (ptrdiff) @@ -1071,7 +1071,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row) field[4]->move_field_offset(-ptrdiff); field[5]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return 0; } diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc index 144ebcddff4..acab0e73a3d 100644 --- a/storage/perfschema/pfs_engine_table.cc +++ b/storage/perfschema/pfs_engine_table.cc @@ -188,17 +188,15 @@ ha_rows PFS_engine_table_share::get_row_count(void) const int PFS_engine_table_share::write_row(TABLE *table, const unsigned char *buf, Field **fields) const { - my_bitmap_map *org_bitmap; - if (m_write_row == NULL) { return HA_ERR_WRONG_COMMAND; } /* We internally read from Fields to support the write interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= m_write_row(table, buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -256,7 +254,6 @@ int PFS_engine_table::read_row(TABLE *table, unsigned char *buf, Field **fields) { - my_bitmap_map *org_bitmap; Field *f; Field **fields_reset; @@ -264,7 +261,7 @@ int PFS_engine_table::read_row(TABLE *table, bool read_all= !bitmap_is_clear_all(table->write_set); /* We internally write to Fields to support the read interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); /* Some callers of the storage engine interface do not honor the @@ -276,7 +273,7 @@ int PFS_engine_table::read_row(TABLE *table, f->reset(); int result= read_row_values(table, buf, fields, read_all); - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); return result; } @@ -294,12 +291,10 @@ int PFS_engine_table::update_row(TABLE *table, const unsigned char *new_buf, Field **fields) { - my_bitmap_map *org_bitmap; - /* We internally read from Fields to support the write interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= update_row_values(table, old_buf, new_buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -308,12 +303,10 @@ int PFS_engine_table::delete_row(TABLE *table, const unsigned char *buf, Field **fields) { - my_bitmap_map *org_bitmap; - /* We internally read from Fields to support the delete interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= delete_row_values(table, buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -1240,11 +1233,11 @@ bool pfs_show_status(handlerton *hton, THD *thd, break; case 141: name= "(filename_hash).count"; - size= filename_hash.count; + size= pfs_filename_hash.count; break; case 142: name= "(filename_hash).size"; - size= filename_hash.size; + size= pfs_filename_hash.size; break; case 143: name= "(host_hash).count"; diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc index ca9e0385021..fd8da77fe40 100644 --- a/storage/perfschema/pfs_instr.cc +++ b/storage/perfschema/pfs_instr.cc @@ -143,7 +143,7 @@ PFS_thread *thread_array= NULL; File instrumentation instances array. @sa file_max @sa file_lost - @sa filename_hash + @sa pfs_filename_hash */ PFS_file *file_array= NULL; @@ -189,8 +189,8 @@ static unsigned char *history_stmts_digest_token_array= NULL; static char *thread_session_connect_attrs_array= NULL; /** Hash table for instrumented files. */ -LF_HASH filename_hash; -/** True if filename_hash is initialized. */ +LF_HASH pfs_filename_hash; +/** True if pfs_filename_hash is initialized. */ static bool filename_hash_inited= false; /** @@ -586,7 +586,7 @@ int init_file_hash(void) { if ((! filename_hash_inited) && (file_max > 0)) { - lf_hash_init(&filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE, + lf_hash_init(&pfs_filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE, 0, 0, filename_hash_get_key, &my_charset_bin); /* filename_hash.size= file_max; */ filename_hash_inited= true; @@ -599,7 +599,7 @@ void cleanup_file_hash(void) { if (filename_hash_inited) { - lf_hash_destroy(&filename_hash); + lf_hash_destroy(&pfs_filename_hash); filename_hash_inited= false; } } @@ -1186,7 +1186,7 @@ void destroy_thread(PFS_thread *pfs) } /** - Get the hash pins for @filename_hash. + Get the hash pins for @pfs_filename_hash. @param thread The running thread. @returns The LF_HASH pins for the thread. */ @@ -1196,7 +1196,7 @@ LF_PINS* get_filename_hash_pins(PFS_thread *thread) { if (! filename_hash_inited) return NULL; - thread->m_filename_hash_pins= lf_hash_get_pins(&filename_hash); + thread->m_filename_hash_pins= lf_hash_get_pins(&pfs_filename_hash); } return thread->m_filename_hash_pins; } @@ -1314,7 +1314,7 @@ find_or_create_file(PFS_thread *thread, PFS_file_class *klass, search: entry= reinterpret_cast<PFS_file**> - (lf_hash_search(&filename_hash, pins, + (lf_hash_search(&pfs_filename_hash, pins, normalized_filename, normalized_length)); if (entry && (entry != MY_ERRPTR)) { @@ -1359,7 +1359,7 @@ search: pfs->m_identity= (const void *)pfs; int res; - res= lf_hash_insert(&filename_hash, thread->m_filename_hash_pins, + res= lf_hash_insert(&pfs_filename_hash, thread->m_filename_hash_pins, &pfs); if (likely(res == 0)) { @@ -1426,7 +1426,7 @@ void destroy_file(PFS_thread *thread, PFS_file *pfs) LF_PINS *pins= get_filename_hash_pins(thread); DBUG_ASSERT(pins != NULL); - lf_hash_delete(&filename_hash, pins, + lf_hash_delete(&pfs_filename_hash, pins, pfs->m_filename, pfs->m_filename_length); if (klass->is_singleton()) klass->m_singleton= NULL; diff --git a/storage/perfschema/pfs_instr.h b/storage/perfschema/pfs_instr.h index 81bc52d1d75..a5ff3b4a17d 100644 --- a/storage/perfschema/pfs_instr.h +++ b/storage/perfschema/pfs_instr.h @@ -698,7 +698,7 @@ void update_socket_derived_flags(); /** Update derived flags for all instruments. */ void update_instruments_derived_flags(); -extern LF_HASH filename_hash; +extern LF_HASH pfs_filename_hash; /** @} */ #endif diff --git a/storage/perfschema/table_accounts.cc b/storage/perfschema/table_accounts.cc index 708f8269a69..550f6614abb 100644 --- a/storage/perfschema/table_accounts.cc +++ b/storage/perfschema/table_accounts.cc @@ -43,8 +43,8 @@ table_accounts::m_share= sizeof(PFS_simple_index), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE accounts(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "CURRENT_CONNECTIONS bigint not null," "TOTAL_CONNECTIONS bigint not null)") } }; diff --git a/storage/perfschema/table_esgs_by_account_by_event_name.cc b/storage/perfschema/table_esgs_by_account_by_event_name.cc index 22e4e0040f1..9a983eb076e 100644 --- a/storage/perfschema/table_esgs_by_account_by_event_name.cc +++ b/storage/perfschema/table_esgs_by_account_by_event_name.cc @@ -49,8 +49,8 @@ table_esgs_by_account_by_event_name::m_share= sizeof(pos_esgs_by_account_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_account_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esgs_by_host_by_event_name.cc b/storage/perfschema/table_esgs_by_host_by_event_name.cc index 86cc2eb1b86..5ff9faf0c1e 100644 --- a/storage/perfschema/table_esgs_by_host_by_event_name.cc +++ b/storage/perfschema/table_esgs_by_host_by_event_name.cc @@ -50,7 +50,7 @@ table_esgs_by_host_by_event_name::m_share= sizeof(pos_esgs_by_host_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_host_by_event_name(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esgs_by_user_by_event_name.cc b/storage/perfschema/table_esgs_by_user_by_event_name.cc index af73c1fc5fd..23b7b0f6689 100644 --- a/storage/perfschema/table_esgs_by_user_by_event_name.cc +++ b/storage/perfschema/table_esgs_by_user_by_event_name.cc @@ -50,7 +50,7 @@ table_esgs_by_user_by_event_name::m_share= sizeof(pos_esgs_by_user_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_user_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esms_by_account_by_event_name.cc b/storage/perfschema/table_esms_by_account_by_event_name.cc index 7afdabcbbfe..312050aa9c9 100644 --- a/storage/perfschema/table_esms_by_account_by_event_name.cc +++ b/storage/perfschema/table_esms_by_account_by_event_name.cc @@ -49,8 +49,8 @@ table_esms_by_account_by_event_name::m_share= sizeof(pos_esms_by_account_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_account_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esms_by_host_by_event_name.cc b/storage/perfschema/table_esms_by_host_by_event_name.cc index 42629ab6c09..b390d1e17a4 100644 --- a/storage/perfschema/table_esms_by_host_by_event_name.cc +++ b/storage/perfschema/table_esms_by_host_by_event_name.cc @@ -50,7 +50,7 @@ table_esms_by_host_by_event_name::m_share= sizeof(pos_esms_by_host_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_host_by_event_name(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esms_by_user_by_event_name.cc b/storage/perfschema/table_esms_by_user_by_event_name.cc index f8708ac9a14..1fa1289aa8c 100644 --- a/storage/perfschema/table_esms_by_user_by_event_name.cc +++ b/storage/perfschema/table_esms_by_user_by_event_name.cc @@ -50,7 +50,7 @@ table_esms_by_user_by_event_name::m_share= sizeof(pos_esms_by_user_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_user_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_ews_by_account_by_event_name.cc b/storage/perfschema/table_ews_by_account_by_event_name.cc index fa6258ec9ac..40e0152f889 100644 --- a/storage/perfschema/table_ews_by_account_by_event_name.cc +++ b/storage/perfschema/table_ews_by_account_by_event_name.cc @@ -49,8 +49,8 @@ table_ews_by_account_by_event_name::m_share= sizeof(pos_ews_by_account_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_account_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_ews_by_host_by_event_name.cc b/storage/perfschema/table_ews_by_host_by_event_name.cc index e3ef7ca3720..d22d6fc8d79 100644 --- a/storage/perfschema/table_ews_by_host_by_event_name.cc +++ b/storage/perfschema/table_ews_by_host_by_event_name.cc @@ -50,7 +50,7 @@ table_ews_by_host_by_event_name::m_share= sizeof(pos_ews_by_host_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_host_by_event_name(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_ews_by_user_by_event_name.cc b/storage/perfschema/table_ews_by_user_by_event_name.cc index cb99f749a9c..b2f8e1da824 100644 --- a/storage/perfschema/table_ews_by_user_by_event_name.cc +++ b/storage/perfschema/table_ews_by_user_by_event_name.cc @@ -50,7 +50,7 @@ table_ews_by_user_by_event_name::m_share= sizeof(pos_ews_by_user_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_user_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_hosts.cc b/storage/perfschema/table_hosts.cc index 8bc5310817c..221e0664590 100644 --- a/storage/perfschema/table_hosts.cc +++ b/storage/perfschema/table_hosts.cc @@ -44,7 +44,7 @@ table_hosts::m_share= sizeof(PFS_simple_index), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE hosts(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "CURRENT_CONNECTIONS bigint not null," "TOTAL_CONNECTIONS bigint not null)") } }; diff --git a/storage/perfschema/table_setup_actors.cc b/storage/perfschema/table_setup_actors.cc index b05f6ad004b..f18d6ceee20 100644 --- a/storage/perfschema/table_setup_actors.cc +++ b/storage/perfschema/table_setup_actors.cc @@ -49,9 +49,9 @@ table_setup_actors::m_share= sizeof(PFS_simple_index), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE setup_actors(" - "HOST CHAR(60) collate utf8_bin default '%' not null," - "USER CHAR(16) collate utf8_bin default '%' not null," - "ROLE CHAR(16) collate utf8_bin default '%' not null)") } + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default '%' not null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null," + "ROLE CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null)") } }; PFS_engine_table* table_setup_actors::create() diff --git a/storage/perfschema/table_threads.cc b/storage/perfschema/table_threads.cc index b396db1a814..59b0af453fb 100644 --- a/storage/perfschema/table_threads.cc +++ b/storage/perfschema/table_threads.cc @@ -46,8 +46,8 @@ table_threads::m_share= "NAME VARCHAR(128) not null," "TYPE VARCHAR(10) not null," "PROCESSLIST_ID BIGINT unsigned," - "PROCESSLIST_USER VARCHAR(16)," - "PROCESSLIST_HOST VARCHAR(60)," + "PROCESSLIST_USER VARCHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ")," + "PROCESSLIST_HOST VARCHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ")," "PROCESSLIST_DB VARCHAR(64)," "PROCESSLIST_COMMAND VARCHAR(16)," "PROCESSLIST_TIME BIGINT," diff --git a/storage/perfschema/table_users.cc b/storage/perfschema/table_users.cc index 883ebd36633..e9592c55f55 100644 --- a/storage/perfschema/table_users.cc +++ b/storage/perfschema/table_users.cc @@ -44,7 +44,7 @@ table_users::m_share= sizeof(PFS_simple_index), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE users(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "CURRENT_CONNECTIONS bigint not null," "TOTAL_CONNECTIONS bigint not null)") } }; diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 52c93c37806..b4fcb097b71 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6116,8 +6116,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() { Field *field = table->key_info[table->s->next_number_index].key_part[0].field; ulonglong max_val = rdb_get_int_col_max_value(field); - my_bitmap_map *const old_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *const old_map = dbug_tmp_use_all_columns(table, &table->read_set); last_val = field->val_int(); if (last_val != max_val) { last_val++; @@ -6132,7 +6131,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() { } } #endif - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); } m_keyread_only = save_keyread_only; @@ -6169,15 +6168,15 @@ void ha_rocksdb::update_auto_incr_val_from_field() { field = table->key_info[table->s->next_number_index].key_part[0].field; max_val = rdb_get_int_col_max_value(field); - my_bitmap_map *const old_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *const old_map = + dbug_tmp_use_all_columns(table, &table->read_set); new_val = field->val_int(); // don't increment if we would wrap around if (new_val != max_val) { new_val++; } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); // Only update if positive value was set for auto_incr column. if (new_val <= max_val) { diff --git a/storage/rocksdb/mysql-test/rocksdb/r/issue896.result b/storage/rocksdb/mysql-test/rocksdb/r/issue896.result index 917c95733f7..6b742ebaf0c 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/issue896.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/issue896.result @@ -9,7 +9,7 @@ KEY `d` (`d`) INSERT INTO t1 VALUES (100, 'aaabbb', UNIX_TIMESTAMP(), 200); EXPLAIN SELECT COUNT(*) FROM t1 FORCE INDEX(d); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index NULL d 11 NULL # Using index +1 SIMPLE t1 index NULL d 9 NULL # Using index # segfault here without the fix SELECT COUNT(*) FROM t1 FORCE INDEX(d); COUNT(*) diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index f2f9adebf46..daca7f7f78d 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -546,7 +546,7 @@ pk key1 col1 explain select key1 from t30; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL key1 20 NULL # Using index +1 SIMPLE t30 index NULL key1 18 NULL # Using index select key1 from t30; key1 row1-key @@ -618,7 +618,7 @@ row3 row3-key row3-data explain select * from t30 order by key1 limit 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL key1 20 NULL # +1 SIMPLE t30 index NULL key1 18 NULL # select * from t30 order by key1 limit 3; pk key1 col1 row1 row1-key row1-data @@ -627,7 +627,7 @@ row3 row3-key row3-data explain select * from t30 order by key1 desc limit 3; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t30 index NULL key1 20 NULL # +1 SIMPLE t30 index NULL key1 18 NULL # select * from t30 order by key1 desc limit 3; pk key1 col1 row5 row5-key row5-data diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index f0bc8a49761..99f1178d897 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -1489,12 +1489,12 @@ void Rdb_key_def::pack_with_make_sort_key( DBUG_ASSERT(*dst != nullptr); const int max_len = fpi->m_max_image_len; - my_bitmap_map *old_map; + MY_BITMAP*old_map; old_map= dbug_tmp_use_all_columns(field->table, - field->table->read_set); + &field->table->read_set); field->sort_string(*dst, max_len); - dbug_tmp_restore_column_map(field->table->read_set, old_map); + dbug_tmp_restore_column_map(&field->table->read_set, old_map); *dst += max_len; } diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc index b9f5d02bd51..8eae98955c3 100644 --- a/storage/sequence/sequence.cc +++ b/storage/sequence/sequence.cc @@ -115,13 +115,13 @@ THR_LOCK_DATA **ha_seq::store_lock(THD *thd, THR_LOCK_DATA **to, void ha_seq::set(unsigned char *buf) { - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); my_ptrdiff_t offset = (my_ptrdiff_t) (buf - table->record[0]); Field *field = table->field[0]; field->move_field_offset(offset); field->store(cur, true); field->move_field_offset(-offset); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } int ha_seq::rnd_init(bool scan) diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index 75558d333e0..f2bc24c47d4 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -3052,7 +3052,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) } #if MYSQL_VERSION_ID>50100 - my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set ); + MY_BITMAP * org_bitmap = dbug_tmp_use_all_columns ( table, &table->write_set ); #endif Field ** field = table->field; @@ -3198,7 +3198,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) m_iCurrentPos++; #if MYSQL_VERSION_ID > 50100 - dbug_tmp_restore_column_map ( table->write_set, org_bitmap ); + dbug_tmp_restore_column_map ( &table->write_set, org_bitmap ); #endif SPH_RET(0); diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index b2600859b88..988c2305b04 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -10112,12 +10112,12 @@ int ha_spider::write_row( if (!table->auto_increment_field_not_null) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif table->next_number_field->store((longlong) 0, TRUE); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif force_auto_increment = FALSE; table->file->insert_id_for_cur_row = 0; @@ -10125,13 +10125,13 @@ int ha_spider::write_row( } else if (auto_increment_mode == 2) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif table->next_number_field->store((longlong) 0, TRUE); table->auto_increment_field_not_null = FALSE; #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif force_auto_increment = FALSE; table->file->insert_id_for_cur_row = 0; diff --git a/storage/spider/mysql-test/spider/r/basic_sql.result b/storage/spider/mysql-test/spider/r/basic_sql.result index 94a09fc317b..ba904b5f577 100644 --- a/storage/spider/mysql-test/spider/r/basic_sql.result +++ b/storage/spider/mysql-test/spider/r/basic_sql.result @@ -717,6 +717,10 @@ TRUNCATE TABLE ta_l; connection master_1; SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a; a b date_format(c, '%Y-%m-%d %H:%i:%s') +connection master_1; +create table t2345678911234567892123456789312345678941234567895123234234(id int) ENGINE=SPIDER +COMMENT='host "192.168.21.1", user "spider", password "password", database "test32738123123123"'; +drop table t2345678911234567892123456789312345678941234567895123234234; deinit connection master_1; diff --git a/storage/spider/mysql-test/spider/t/basic_sql.test b/storage/spider/mysql-test/spider/t/basic_sql.test index 5bb040047fc..a3184a14beb 100644 --- a/storage/spider/mysql-test/spider/t/basic_sql.test +++ b/storage/spider/mysql-test/spider/t/basic_sql.test @@ -2677,6 +2677,11 @@ if ($USE_CHILD_GROUP2) --connection master_1 SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a; +--connection master_1 +create table t2345678911234567892123456789312345678941234567895123234234(id int) ENGINE=SPIDER + COMMENT='host "192.168.21.1", user "spider", password "password", database "test32738123123123"'; +drop table t2345678911234567892123456789312345678941234567895123234234; + --echo --echo deinit --disable_warnings @@ -2689,6 +2694,7 @@ if ($USE_CHILD_GROUP2) --connection child2_2 DROP DATABASE IF EXISTS auto_test_remote2; } + --disable_query_log --disable_result_log --source test_deinit.inc diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 6d2afc1fd55..264f85d74cb 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1733,7 +1733,7 @@ int spider_db_append_key_where_internal( DBUG_PRINT("info", ("spider end_key_part_map=%lu", end_key_part_map)); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set); #endif if (sql_kind == SPIDER_SQL_KIND_HANDLER) @@ -2663,7 +2663,7 @@ end: if (sql_kind == SPIDER_SQL_KIND_SQL) dbton_hdl->set_order_pos(sql_type); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif DBUG_RETURN(0); } @@ -3200,8 +3200,8 @@ int spider_db_fetch_table( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); @@ -3209,7 +3209,7 @@ int spider_db_fetch_table( spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } else { DBUG_PRINT("info", ("spider bitmap is not set %s", @@ -3380,8 +3380,8 @@ int spider_db_fetch_key( bitmap_is_set(table->write_set, field->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(field))); @@ -3389,7 +3389,7 @@ int spider_db_fetch_key( spider_db_fetch_row(share, field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -3504,15 +3504,15 @@ int spider_db_fetch_minimum_columns( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5670,8 +5670,8 @@ int spider_db_seek_tmp_table( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); @@ -5679,7 +5679,7 @@ int spider_db_seek_tmp_table( spider_db_fetch_row(spider->share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5758,8 +5758,8 @@ int spider_db_seek_tmp_key( bitmap_is_set(table->write_set, field->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(field))); @@ -5767,7 +5767,7 @@ int spider_db_seek_tmp_key( spider_db_fetch_row(spider->share, field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5849,8 +5849,8 @@ int spider_db_seek_tmp_minimum_columns( bitmap_is_set(table->write_set, (*field)->field_index))); */ #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); @@ -5859,7 +5859,7 @@ int spider_db_seek_tmp_minimum_columns( DBUG_RETURN(error_num); row->next(); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } else if (bitmap_is_set(table->read_set, (*field)->field_index)) @@ -9668,7 +9668,7 @@ int spider_db_open_item_string( { THD *thd = NULL; TABLE *table; - my_bitmap_map *saved_map; + MY_BITMAP *saved_map; Time_zone *saved_time_zone; String str_value; char tmp_buf[MAX_FIELD_WIDTH]; @@ -9697,7 +9697,7 @@ int spider_db_open_item_string( */ table = field->table; thd = table->in_use; - saved_map = dbug_tmp_use_all_columns(table, table->write_set); + saved_map = dbug_tmp_use_all_columns(table, &table->write_set); item->save_in_field(field, FALSE); saved_time_zone = thd->variables.time_zone; thd->variables.time_zone = UTC; @@ -9742,7 +9742,7 @@ end: if (thd) { thd->variables.time_zone = saved_time_zone; - dbug_tmp_restore_column_map(table->write_set, saved_map); + dbug_tmp_restore_column_map(&table->write_set, saved_map); } } @@ -9784,7 +9784,7 @@ int spider_db_open_item_int( { THD *thd = NULL; TABLE *table; - my_bitmap_map *saved_map; + MY_BITMAP *saved_map; Time_zone *saved_time_zone; String str_value; bool print_quoted_string; @@ -9812,7 +9812,7 @@ int spider_db_open_item_int( */ table = field->table; thd = table->in_use; - saved_map = dbug_tmp_use_all_columns(table, table->write_set); + saved_map = dbug_tmp_use_all_columns(table, &table->write_set); item->save_in_field(field, FALSE); saved_time_zone = thd->variables.time_zone; thd->variables.time_zone = UTC; @@ -9858,7 +9858,7 @@ end: if (thd) { thd->variables.time_zone = saved_time_zone; - dbug_tmp_restore_column_map(table->write_set, saved_map); + dbug_tmp_restore_column_map(&table->write_set, saved_map); } } @@ -10178,8 +10178,8 @@ int spider_db_udf_fetch_table( DBUG_RETURN(HA_ERR_END_OF_FILE); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif for ( roop_count = 0, @@ -10192,7 +10192,7 @@ int spider_db_udf_fetch_table( spider_db_udf_fetch_row(trx, *field, row))) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif DBUG_RETURN(error_num); } @@ -10202,7 +10202,7 @@ int spider_db_udf_fetch_table( for (; roop_count < set_off; roop_count++, field++) (*field)->set_default(); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif table->status = 0; DBUG_RETURN(0); diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 420f14f9919..86ce0c530b1 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -9690,8 +9690,7 @@ int spider_mbase_handler::append_update_set( mysql_share->append_column_name(str, (*fields)->field_index); str->q_append(SPIDER_SQL_EQUAL_STR, SPIDER_SQL_EQUAL_LEN); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, - table->read_set); + MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set); #endif if ( spider_db_mbase_utility-> @@ -9700,12 +9699,12 @@ int spider_mbase_handler::append_update_set( str->reserve(SPIDER_SQL_COMMA_LEN) ) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif DBUG_RETURN(HA_ERR_OUT_OF_MEM); } #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif } str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN); @@ -12426,8 +12425,8 @@ int spider_mbase_handler::append_insert_values( bitmap_is_set(table->read_set, (*field)->field_index) ) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->read_set); #endif add_value = TRUE; DBUG_PRINT("info",("spider is_null()=%s", @@ -12449,7 +12448,7 @@ int spider_mbase_handler::append_insert_values( if (str->reserve(SPIDER_SQL_NULL_LEN + SPIDER_SQL_COMMA_LEN)) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif str->length(0); DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -12463,7 +12462,7 @@ int spider_mbase_handler::append_insert_values( str->reserve(SPIDER_SQL_COMMA_LEN) ) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif str->length(0); DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -12471,7 +12470,7 @@ int spider_mbase_handler::append_insert_values( } str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif } } diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 3734233552d..d36f7624adf 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -2313,7 +2313,7 @@ int ha_tokudb::pack_row_in_buff( int r = ENOSYS; memset((void *) row, 0, sizeof(*row)); - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); // Copy null bytes memcpy(row_buff, record, table_share->null_bytes); @@ -2362,7 +2362,7 @@ int ha_tokudb::pack_row_in_buff( row->size = (size_t) (var_field_data_ptr - row_buff); r = 0; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return r; } @@ -2758,7 +2758,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( { uint32_t size = 0; uchar* tmp_buff = buff; - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); key->data = buff; @@ -2797,7 +2797,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( key->size = size; DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return key; } @@ -2890,7 +2890,7 @@ DBT* ha_tokudb::pack_key( KEY* key_info = &table->key_info[keynr]; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set); memset((void *) key, 0, sizeof(*key)); key->data = buff; @@ -2927,7 +2927,7 @@ DBT* ha_tokudb::pack_key( key->size = (buff - (uchar *) key->data); DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(key); } @@ -2955,7 +2955,7 @@ DBT* ha_tokudb::pack_ext_key( KEY* key_info = &table->key_info[keynr]; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set); memset((void *) key, 0, sizeof(*key)); key->data = buff; @@ -3034,7 +3034,7 @@ DBT* ha_tokudb::pack_ext_key( key->size = (buff - (uchar *) key->data); DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(key); } #endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS diff --git a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result index 3b82bbcef4f..c01edef283e 100644 --- a/storage/tokudb/mysql-test/tokudb/r/type_decimal.result +++ b/storage/tokudb/mysql-test/tokudb/r/type_decimal.result @@ -177,9 +177,8 @@ Note 1265 Data truncated for column 'a' at row 2 insert into t1 values ("1e+18446744073709551615"),("1e+18446744073709551616"),("1e-9223372036854775807"),("1e-9223372036854775809"); Warnings: Warning 1264 Out of range value for column 'a' at row 1 -Warning 1366 Incorrect decimal value: '1e+18446744073709551616' for column `test`.`t1`.`a` at row 2 +Warning 1264 Out of range value for column 'a' at row 2 Note 1265 Data truncated for column 'a' at row 3 -Warning 1366 Incorrect decimal value: '1e-9223372036854775809' for column `test`.`t1`.`a` at row 4 insert into t1 values ("123.4e"),("123.4e+2"),("123.4e-2"),("123e1"),("123e+0"); Warnings: Warning 1265 Data truncated for column 'a' at row 1 @@ -210,7 +209,7 @@ a 99999999.99 0.00 99999999.99 -0.00 +99999999.99 0.00 0.00 123.40 |