summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorSachin Setiya <sachin.setiya@mariadb.com>2017-03-17 02:05:20 +0530
committerSachin Setiya <sachin.setiya@mariadb.com>2017-03-17 02:05:20 +0530
commitf66395f7c06f357e7ace74317e6563a1e5dbc3ae (patch)
treed1669b069aa722c9c73abe52e5924b7bbba41b39 /storage
parentc401773c8dd97bd9f6bf6cff9fcafdf24fbd0ee1 (diff)
parentc4f3e64c23fe7f7fd18c0a79f87f9771df15fe9f (diff)
downloadmariadb-git-f66395f7c06f357e7ace74317e6563a1e5dbc3ae.tar.gz
Merge tag 'mariadb-10.0.30' into bb-sachin-10.0-galera-merge
Signed-off-by: Sachin Setiya <sachin.setiya@mariadb.com>
Diffstat (limited to 'storage')
-rw-r--r--storage/connect/CMakeLists.txt10
-rw-r--r--storage/connect/array.cpp108
-rw-r--r--storage/connect/array.h6
-rw-r--r--storage/connect/colblk.cpp16
-rw-r--r--storage/connect/connect.cc77
-rw-r--r--storage/connect/domdoc.cpp11
-rw-r--r--storage/connect/domdoc.h1
-rw-r--r--storage/connect/filamap.cpp12
-rw-r--r--storage/connect/filamdbf.cpp27
-rw-r--r--storage/connect/filamgz.cpp12
-rw-r--r--storage/connect/filamgz.h12
-rw-r--r--storage/connect/filamzip.cpp569
-rw-r--r--storage/connect/filamzip.h164
-rw-r--r--storage/connect/ha_connect.cc345
-rw-r--r--storage/connect/ioapi.c21
-rw-r--r--storage/connect/ioapi.h5
-rw-r--r--storage/connect/jdbconn.cpp24
-rw-r--r--storage/connect/json.cpp80
-rw-r--r--storage/connect/jsonudf.cpp109
-rw-r--r--storage/connect/mycat.cc15
-rw-r--r--storage/connect/myconn.cpp20
-rw-r--r--storage/connect/mysql-test/connect/r/xml_zip.result98
-rw-r--r--storage/connect/mysql-test/connect/r/zip.result240
-rw-r--r--storage/connect/mysql-test/connect/std_data/bios.json273
-rw-r--r--storage/connect/mysql-test/connect/std_data/xsample2.xml47
-rw-r--r--storage/connect/mysql-test/connect/t/have_zip.inc19
-rw-r--r--storage/connect/mysql-test/connect/t/xml_zip.test41
-rw-r--r--storage/connect/mysql-test/connect/t/zip.test136
-rw-r--r--storage/connect/odbconn.cpp14
-rw-r--r--storage/connect/plgdbsem.h26
-rw-r--r--storage/connect/plgdbutl.cpp14
-rw-r--r--storage/connect/plgxml.cpp4
-rw-r--r--storage/connect/plgxml.h2
-rw-r--r--storage/connect/plugutil.c3
-rw-r--r--storage/connect/reldef.cpp6
-rw-r--r--storage/connect/reldef.h18
-rw-r--r--storage/connect/tabdos.cpp40
-rw-r--r--storage/connect/tabdos.h8
-rw-r--r--storage/connect/tabext.cpp640
-rw-r--r--storage/connect/tabext.h200
-rw-r--r--storage/connect/tabfix.cpp4
-rw-r--r--storage/connect/tabfix.h2
-rw-r--r--storage/connect/tabfmt.cpp75
-rw-r--r--storage/connect/tabfmt.h5
-rw-r--r--storage/connect/tabjdbc.cpp555
-rw-r--r--storage/connect/tabjdbc.h129
-rw-r--r--storage/connect/tabjson.cpp31
-rw-r--r--storage/connect/tabjson.h4
-rw-r--r--storage/connect/table.cpp249
-rw-r--r--storage/connect/tabmac.cpp2
-rw-r--r--storage/connect/tabmac.h2
-rw-r--r--storage/connect/tabmul.cpp26
-rw-r--r--storage/connect/tabmul.h18
-rw-r--r--storage/connect/tabmysql.cpp183
-rw-r--r--storage/connect/tabmysql.h75
-rw-r--r--storage/connect/taboccur.cpp9
-rw-r--r--storage/connect/tabodbc.cpp321
-rw-r--r--storage/connect/tabodbc.h133
-rw-r--r--storage/connect/tabpivot.cpp7
-rw-r--r--storage/connect/tabpivot.h2
-rw-r--r--storage/connect/tabsys.cpp8
-rw-r--r--storage/connect/tabsys.h4
-rw-r--r--storage/connect/tabtbl.cpp23
-rw-r--r--storage/connect/tabutil.cpp23
-rw-r--r--storage/connect/tabutil.h8
-rw-r--r--storage/connect/tabvct.cpp6
-rw-r--r--storage/connect/tabvct.h2
-rw-r--r--storage/connect/tabvir.cpp2
-rw-r--r--storage/connect/tabwmi.cpp21
-rw-r--r--storage/connect/tabxcl.cpp8
-rw-r--r--storage/connect/tabxcl.h2
-rw-r--r--storage/connect/tabxml.cpp13
-rw-r--r--storage/connect/tabxml.h2
-rw-r--r--storage/connect/tabzip.cpp16
-rw-r--r--storage/connect/tabzip.h2
-rw-r--r--storage/connect/value.h2
-rwxr-xr-xstorage/connect/xindex.cpp8
-rw-r--r--storage/connect/xindex.h4
-rw-r--r--storage/connect/xobject.h3
-rw-r--r--storage/connect/xtable.h211
-rw-r--r--storage/connect/zip.c14
-rw-r--r--storage/innobase/btr/btr0cur.cc8
-rw-r--r--storage/innobase/buf/buf0dump.cc16
-rw-r--r--storage/innobase/buf/buf0flu.cc4
-rw-r--r--storage/innobase/dict/dict0dict.cc1
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc6
-rw-r--r--storage/innobase/dyn/dyn0dyn.cc1
-rw-r--r--storage/innobase/fil/fil0fil.cc106
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc7
-rw-r--r--storage/innobase/fts/fts0fts.cc16
-rw-r--r--storage/innobase/fts/fts0opt.cc3
-rw-r--r--storage/innobase/handler/ha_innodb.cc23
-rw-r--r--storage/innobase/include/buf0buf.h5
-rw-r--r--storage/innobase/include/buf0dblwr.h7
-rw-r--r--storage/innobase/include/dict0dict.ic7
-rw-r--r--storage/innobase/include/dict0mem.h2
-rw-r--r--storage/innobase/include/dict0stats_bg.h4
-rw-r--r--storage/innobase/include/dyn0dyn.ic8
-rw-r--r--storage/innobase/include/fil0fil.h5
-rw-r--r--storage/innobase/include/fsp0fsp.h2
-rw-r--r--storage/innobase/include/fsp0types.h1
-rw-r--r--storage/innobase/include/fts0fts.h5
-rw-r--r--storage/innobase/include/fts0types.h4
-rw-r--r--storage/innobase/include/lock0lock.h16
-rw-r--r--storage/innobase/include/log0log.h21
-rw-r--r--storage/innobase/include/mach0data.ic13
-rw-r--r--storage/innobase/include/os0file.h8
-rw-r--r--storage/innobase/include/page0page.ic1
-rw-r--r--storage/innobase/include/srv0srv.h37
-rw-r--r--storage/innobase/include/trx0purge.h6
-rw-r--r--storage/innobase/include/ut0wqueue.h5
-rw-r--r--storage/innobase/log/log0log.cc21
-rw-r--r--storage/innobase/log/log0recv.cc28
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc1
-rw-r--r--storage/innobase/os/os0file.cc130
-rw-r--r--storage/innobase/page/page0page.cc2
-rw-r--r--storage/innobase/page/page0zip.cc2
-rw-r--r--storage/innobase/row/row0ftsort.cc75
-rw-r--r--storage/innobase/row/row0merge.cc6
-rw-r--r--storage/innobase/row/row0upd.cc4
-rw-r--r--storage/innobase/srv/srv0conc.cc9
-rw-r--r--storage/innobase/srv/srv0srv.cc218
-rw-r--r--storage/innobase/srv/srv0start.cc355
-rw-r--r--storage/innobase/sync/sync0sync.cc3
-rw-r--r--storage/innobase/trx/trx0purge.cc42
-rw-r--r--storage/innobase/trx/trx0sys.cc4
-rw-r--r--storage/innobase/trx/trx0trx.cc6
-rw-r--r--storage/innobase/trx/trx0undo.cc29
-rw-r--r--storage/maria/ha_maria.cc4
-rw-r--r--storage/maria/ma_check.c6
-rw-r--r--storage/maria/ma_create.c57
-rw-r--r--storage/maria/ma_delete_table.c17
-rw-r--r--storage/maria/ma_open.c71
-rw-r--r--storage/maria/ma_static.c6
-rw-r--r--storage/maria/maria_chk.c2
-rw-r--r--storage/maria/maria_def.h3
-rw-r--r--storage/mroonga/vendor/groonga/lib/CMakeLists.txt12
-rw-r--r--storage/myisam/ha_myisam.cc83
-rw-r--r--storage/myisam/ha_myisam.h2
-rw-r--r--storage/myisam/mi_check.c18
-rw-r--r--storage/myisam/mi_create.c53
-rw-r--r--storage/myisam/mi_delete_table.c36
-rw-r--r--storage/myisam/mi_open.c71
-rw-r--r--storage/myisam/mi_static.c8
-rw-r--r--storage/myisam/myisamchk.c2
-rw-r--r--storage/myisam/myisamdef.h3
-rw-r--r--storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result2
-rw-r--r--storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.test2
-rw-r--r--storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.result2
-rw-r--r--storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.test2
-rw-r--r--storage/tokudb/CMakeLists.txt2
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc23
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.h5
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.cc3
-rw-r--r--storage/tokudb/PerconaFT/ft/node.cc18
-rw-r--r--storage/tokudb/PerconaFT/ft/node.h54
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc3
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/roll.cc3
-rw-r--r--storage/tokudb/PerconaFT/util/dmt.h5
-rw-r--r--storage/tokudb/PerconaFT/util/omt.h2
-rw-r--r--storage/tokudb/ha_tokudb.cc81
-rw-r--r--storage/tokudb/ha_tokudb.h2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result46
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result469
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result43
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test64
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test5
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test1
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test5
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/suite.opt2
-rw-r--r--storage/xtradb/btr/btr0btr.cc4
-rw-r--r--storage/xtradb/btr/btr0cur.cc118
-rw-r--r--storage/xtradb/buf/buf0buddy.cc1
-rw-r--r--storage/xtradb/buf/buf0buf.cc9
-rw-r--r--storage/xtradb/buf/buf0dump.cc16
-rw-r--r--storage/xtradb/buf/buf0flu.cc4
-rw-r--r--storage/xtradb/buf/buf0lru.cc142
-rw-r--r--storage/xtradb/dict/dict0dict.cc1
-rw-r--r--storage/xtradb/dict/dict0stats.cc13
-rw-r--r--storage/xtradb/dict/dict0stats_bg.cc6
-rw-r--r--storage/xtradb/dyn/dyn0dyn.cc1
-rw-r--r--storage/xtradb/fil/fil0fil.cc104
-rw-r--r--storage/xtradb/fsp/fsp0fsp.cc13
-rw-r--r--storage/xtradb/fts/fts0fts.cc16
-rw-r--r--storage/xtradb/fts/fts0opt.cc10
-rw-r--r--storage/xtradb/handler/ha_innodb.cc124
-rw-r--r--storage/xtradb/handler/ha_innodb.h2
-rw-r--r--storage/xtradb/handler/handler0alter.cc79
-rw-r--r--storage/xtradb/handler/i_s.cc24
-rw-r--r--storage/xtradb/include/btr0cur.h18
-rw-r--r--storage/xtradb/include/btr0sea.h7
-rw-r--r--storage/xtradb/include/btr0sea.ic4
-rw-r--r--storage/xtradb/include/buf0buddy.ic2
-rw-r--r--storage/xtradb/include/buf0buf.h11
-rw-r--r--storage/xtradb/include/buf0dblwr.h7
-rw-r--r--storage/xtradb/include/dict0dict.h19
-rw-r--r--storage/xtradb/include/dict0dict.ic7
-rw-r--r--storage/xtradb/include/dict0mem.h2
-rw-r--r--storage/xtradb/include/dict0stats_bg.h4
-rw-r--r--storage/xtradb/include/dyn0dyn.h18
-rw-r--r--storage/xtradb/include/dyn0dyn.ic10
-rw-r--r--storage/xtradb/include/fil0fil.h5
-rw-r--r--storage/xtradb/include/fsp0fsp.h2
-rw-r--r--storage/xtradb/include/fsp0types.h1
-rw-r--r--storage/xtradb/include/fts0fts.h5
-rw-r--r--storage/xtradb/include/fts0types.h4
-rw-r--r--storage/xtradb/include/lock0lock.h16
-rw-r--r--storage/xtradb/include/log0log.h21
-rw-r--r--storage/xtradb/include/log0online.h20
-rw-r--r--storage/xtradb/include/mach0data.h16
-rw-r--r--storage/xtradb/include/mach0data.ic13
-rw-r--r--storage/xtradb/include/mtr0mtr.h5
-rw-r--r--storage/xtradb/include/os0file.h13
-rw-r--r--storage/xtradb/include/os0thread.h8
-rw-r--r--storage/xtradb/include/page0page.h20
-rw-r--r--storage/xtradb/include/page0page.ic1
-rw-r--r--storage/xtradb/include/page0zip.h7
-rw-r--r--storage/xtradb/include/rem0rec.h3
-rw-r--r--storage/xtradb/include/row0upd.h5
-rw-r--r--storage/xtradb/include/srv0srv.h46
-rw-r--r--storage/xtradb/include/trx0purge.h6
-rw-r--r--storage/xtradb/include/trx0trx.h2
-rw-r--r--storage/xtradb/include/univ.i4
-rw-r--r--storage/xtradb/include/ut0wqueue.h5
-rw-r--r--storage/xtradb/log/log0log.cc21
-rw-r--r--storage/xtradb/log/log0online.cc175
-rw-r--r--storage/xtradb/log/log0recv.cc29
-rw-r--r--storage/xtradb/mach/mach0data.cc44
-rw-r--r--storage/xtradb/mtr/mtr0mtr.cc1
-rw-r--r--storage/xtradb/os/os0file.cc189
-rw-r--r--storage/xtradb/os/os0thread.cc28
-rw-r--r--storage/xtradb/page/page0page.cc2
-rw-r--r--storage/xtradb/page/page0zip.cc2
-rw-r--r--storage/xtradb/rem/rem0rec.cc2
-rw-r--r--storage/xtradb/row/row0ftsort.cc75
-rw-r--r--storage/xtradb/row/row0merge.cc10
-rw-r--r--storage/xtradb/row/row0mysql.cc2
-rw-r--r--storage/xtradb/row/row0purge.cc2
-rw-r--r--storage/xtradb/row/row0upd.cc4
-rw-r--r--storage/xtradb/srv/srv0conc.cc9
-rw-r--r--storage/xtradb/srv/srv0srv.cc314
-rw-r--r--storage/xtradb/srv/srv0start.cc383
-rw-r--r--storage/xtradb/sync/sync0sync.cc3
-rw-r--r--storage/xtradb/trx/trx0purge.cc20
-rw-r--r--storage/xtradb/trx/trx0sys.cc70
-rw-r--r--storage/xtradb/trx/trx0trx.cc6
-rw-r--r--storage/xtradb/trx/trx0undo.cc29
248 files changed, 6840 insertions, 3873 deletions
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index ce6de424421..a602084b5bd 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -22,16 +22,16 @@ fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h
array.cpp blkfil.cpp colblk.cpp csort.cpp
filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp
filter.cpp json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp
-reldef.cpp tabcol.cpp tabdos.cpp tabfix.cpp tabfmt.cpp tabjson.cpp table.cpp
-tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp
-tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp
+reldef.cpp tabcol.cpp tabdos.cpp tabext.cpp tabfix.cpp tabfmt.cpp tabjson.cpp
+table.cpp tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp
+tabutil.cpp tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp
array.h blkfil.h block.h catalog.h checklvl.h colblk.h connect.h csort.h
engmsg.h filamap.h filamdbf.h filamfix.h filamgz.h filamtxt.h
filter.h global.h ha_connect.h inihandl.h json.h jsonudf.h maputil.h msgid.h
mycat.h myconn.h myutil.h os.h osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h
-resource.h tabcol.h tabdos.h tabfix.h tabfmt.h tabjson.h tabmul.h tabmysql.h
-taboccur.h tabpivot.h tabsys.h tabtbl.h tabutil.h tabvir.h tabxcl.h
+resource.h tabcol.h tabdos.h tabext.h tabfix.h tabfmt.h tabjson.h tabmul.h
+tabmysql.h taboccur.h tabpivot.h tabsys.h tabtbl.h tabutil.h tabvir.h tabxcl.h
user_connect.h valblk.h value.h xindex.h xobject.h xtable.h)
#
diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp
index 193514eeb99..1998ab890e9 100644
--- a/storage/connect/array.cpp
+++ b/storage/connect/array.cpp
@@ -1,7 +1,7 @@
/************* Array C++ Functions Source Code File (.CPP) *************/
/* Name: ARRAY.CPP Version 2.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* This file contains the XOBJECT derived class ARRAY functions. */
/* ARRAY is used for elaborate type of processing, such as sorting */
@@ -141,7 +141,7 @@ PARRAY MakeValueArray(PGLOBAL g, PPARM pp)
/* ARRAY public constructor. */
/***********************************************************************/
ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec)
- : CSORT(FALSE)
+ : CSORT(false)
{
Nval = 0;
Ndif = 0;
@@ -188,14 +188,14 @@ ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec)
else if (type != TYPE_PCHAR)
Value = AllocateValue(g, type, Len, prec);
- Constant = TRUE;
+ Constant = true;
} // end of ARRAY constructor
#if 0
/***********************************************************************/
/* ARRAY public constructor from a QUERY. */
/***********************************************************************/
-ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE)
+ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(false)
{
Type = qryp->GetColType(0);
Nval = qryp->GetNblin();
@@ -206,7 +206,7 @@ ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE)
Xsize = -1;
Len = qryp->GetColLength(0);
X = Inf = Sup = 0;
- Correlated = FALSE;
+ Correlated = false;
switch (Type) {
case TYPE_STRING:
@@ -229,13 +229,13 @@ ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE)
// The error message was built by ???
Type = TYPE_ERROR;
- Constant = TRUE;
+ Constant = true;
} // end of ARRAY constructor
/***********************************************************************/
/* ARRAY constructor from a TYPE_LIST subarray. */
/***********************************************************************/
-ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(FALSE)
+ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(false)
{
int prec;
LSTBLK *lp;
@@ -260,7 +260,7 @@ ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(FALSE)
Len = (Type == TYPE_STRING) ? Vblp->GetVlen() : 0;
prec = (Type == TYPE_FLOAT) ? 2 : 0;
Value = AllocateValue(g, Type, Len, prec, NULL);
- Constant = TRUE;
+ Constant = true;
} // end of ARRAY constructor
/***********************************************************************/
@@ -283,7 +283,7 @@ bool ARRAY::AddValue(PGLOBAL g, PSZ strp)
{
if (Type != TYPE_STRING) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "CHAR");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -292,7 +292,7 @@ bool ARRAY::AddValue(PGLOBAL g, PSZ strp)
//Value->SetValue_psz(strp);
//Vblp->SetValue(valp, Nval++);
Vblp->SetValue(strp, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -302,14 +302,14 @@ bool ARRAY::AddValue(PGLOBAL g, void *p)
{
if (Type != TYPE_PCHAR) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "PCHAR");
- return TRUE;
+ return true;
} // endif Type
if (trace)
htrc(" adding pointer(%d): %p\n", Nval, p);
Vblp->SetValue((PSZ)p, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -319,7 +319,7 @@ bool ARRAY::AddValue(PGLOBAL g, short n)
{
if (Type != TYPE_SHORT) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "SHORT");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -328,7 +328,7 @@ bool ARRAY::AddValue(PGLOBAL g, short n)
//Value->SetValue(n);
//Vblp->SetValue(valp, Nval++);
Vblp->SetValue(n, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -338,7 +338,7 @@ bool ARRAY::AddValue(PGLOBAL g, int n)
{
if (Type != TYPE_INT) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "INTEGER");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -347,7 +347,7 @@ bool ARRAY::AddValue(PGLOBAL g, int n)
//Value->SetValue(n);
//Vblp->SetValue(valp, Nval++);
Vblp->SetValue(n, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -357,7 +357,7 @@ bool ARRAY::AddValue(PGLOBAL g, double d)
{
if (Type != TYPE_DOUBLE) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "DOUBLE");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -365,7 +365,7 @@ bool ARRAY::AddValue(PGLOBAL g, double d)
Value->SetValue(d);
Vblp->SetValue(Value, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -376,7 +376,7 @@ bool ARRAY::AddValue(PGLOBAL g, PXOB xp)
if (Type != xp->GetResultType()) {
sprintf(g->Message, MSG(ADD_BAD_TYPE),
GetTypeName(xp->GetResultType()), GetTypeName(Type));
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -384,7 +384,7 @@ bool ARRAY::AddValue(PGLOBAL g, PXOB xp)
//AddValue(xp->GetValue());
Vblp->SetValue(xp->GetValue(), Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -395,14 +395,14 @@ bool ARRAY::AddValue(PGLOBAL g, PVAL vp)
if (Type != vp->GetType()) {
sprintf(g->Message, MSG(ADD_BAD_TYPE),
GetTypeName(vp->GetType()), GetTypeName(Type));
- return TRUE;
+ return true;
} // endif Type
if (trace)
htrc(" adding (%d) from vp=%p\n", Nval, vp);
Vblp->SetValue(vp, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -423,12 +423,12 @@ bool ARRAY::GetSubValue(PGLOBAL g, PVAL valp, int *kp)
if (Type != TYPE_LIST) {
sprintf(g->Message, MSG(NO_SUB_VAL), Type);
- return TRUE;
+ return true;
} // endif Type
vblp = ((LSTBLK*)Vblp)->Mbvk[kp[0]]->Vblk;
valp->SetValue_pvblk(vblp, kp[1]);
- return FALSE;
+ return false;
} // end of GetSubValue
#endif // 0
@@ -476,11 +476,11 @@ bool ARRAY::Find(PVAL valp)
else if (n > 0)
Inf = X;
else
- return TRUE;
+ return true;
} // endwhile
- return FALSE;
+ return false;
} // end of Find
/***********************************************************************/
@@ -504,9 +504,9 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
int top = Nval - 1;
if (top < 0) // Array is empty
- // Return TRUE for ALL because it means that there are no item that
+ // Return true for ALL because it means that there are no item that
// does not verify the condition, which is true indeed.
- // Return FALSE for ANY because TRUE means that there is at least
+ // Return false for ANY because true means that there is at least
// one item that verifies the condition, which is false.
return opm == 2;
@@ -528,9 +528,9 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
else if (opc == OP_NE && opm == 2)
return !Find(vp);
else if (opc == OP_EQ && opm == 2)
- return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : FALSE;
+ return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : false;
else if (opc == OP_NE && opm == 1)
- return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : TRUE;
+ return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : true;
if (Type != TYPE_LIST) {
if (opc == OP_GT || opc == OP_GE)
@@ -544,15 +544,15 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
if (opm == 2) {
for (i = 0; i < Nval; i++)
if (Vcompare(vp, i) & bt)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
} else { // opm == 1
for (i = 0; i < Nval; i++)
if (!(Vcompare(vp, i) & bt))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
} // endif opm
} // end of FilTest
@@ -566,7 +566,7 @@ bool ARRAY::CanBeShort(void)
int* To_Val = (int*)Valblk->GetMemp();
if (Type != TYPE_INT || !Ndif)
- return FALSE;
+ return false;
// Because the array is sorted, this is true if all the array
// int values are in the range of SHORT values
@@ -582,7 +582,7 @@ bool ARRAY::CanBeShort(void)
int ARRAY::Convert(PGLOBAL g, int k, PVAL vp)
{
int i, prec = 0;
- bool b = FALSE;
+ bool b = false;
PMBV ovblk = Valblk;
PVBLK ovblp = Vblp;
@@ -619,7 +619,7 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp)
if (((DTVAL*)Value)->SetFormat(g, vp))
return TYPE_ERROR;
else
- b = TRUE; // Sort the new array on date internal values
+ b = true; // Sort the new array on date internal values
/*********************************************************************/
/* Do the actual conversion. */
@@ -706,7 +706,7 @@ void ARRAY::SetPrecision(PGLOBAL g, int p)
/***********************************************************************/
/* Sort and eliminate distinct values from an array. */
/* Note: this is done by making a sorted index on distinct values. */
-/* Returns FALSE if Ok or TRUE in case of error. */
+/* Returns false if Ok or true in case of error. */
/***********************************************************************/
bool ARRAY::Sort(PGLOBAL g)
{
@@ -789,14 +789,14 @@ bool ARRAY::Sort(PGLOBAL g)
Bot = -1; // For non optimized search
Top = Ndif; // Find searches the whole array.
- return FALSE;
+ return false;
error:
Nval = Ndif = 0;
Valblk->Free();
PlgDBfree(Index);
PlgDBfree(Offset);
- return TRUE;
+ return true;
} // end of Sort
/***********************************************************************/
@@ -839,9 +839,9 @@ void *ARRAY::GetSortIndex(PGLOBAL g)
/***********************************************************************/
/* Block filter testing for IN operator on Column/Array operands. */
-/* Here we call Find that returns TRUE if the value is in the array */
+/* Here we call Find that returns true if the value is in the array */
/* with X equal to the index of the found value in the array, or */
-/* FALSE if the value is not in the array with Inf and Sup being the */
+/* false if the value is not in the array with Inf and Sup being the */
/* indexes of the array values that are immediately below and over */
/* the not found value. This enables to restrict the array to the */
/* values that are between the min and max block values and to return */
@@ -854,9 +854,9 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
bool bin, bax, pin, pax, veq, all = (opm == 2);
if (Ndif == 0) // Array is empty
- // Return TRUE for ALL because it means that there are no item that
+ // Return true for ALL because it means that there are no item that
// does not verify the condition, which is true indeed.
- // Return FALSE for ANY because TRUE means that there is at least
+ // Return false for ANY because true means that there is at least
// one item that verifies the condition, which is false.
return (all) ? 2 : -2;
else if (opc == OP_EQ && all && Ndif > 1)
@@ -864,7 +864,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
else if (opc == OP_NE && !all && Ndif > 1)
return 2;
// else if (Ndif == 1)
-// all = FALSE;
+// all = false;
// veq is true when all values in the block are equal
switch (Type) {
@@ -874,7 +874,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
case TYPE_SHORT: veq = *(short*)minp == *(short*)maxp; break;
case TYPE_INT: veq = *(int*)minp == *(int*)maxp; break;
case TYPE_DOUBLE: veq = *(double*)minp == *(double*)maxp; break;
- default: veq = FALSE; // Error ?
+ default: veq = false; // Error ?
} // endswitch type
if (!s)
@@ -898,7 +898,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
case OP_GT: return -1; break;
} // endswitch opc
- pax = (opc == OP_GE) ? (X < Ndif - 1) : TRUE;
+ pax = (opc == OP_GE) ? (X < Ndif - 1) : true;
} else if (Inf == Bot) {
// Max value is smaller than min list value
return (opc == OP_LT || opc == OP_LE || opc == OP_NE) ? 1 : -1;
@@ -924,7 +924,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
case OP_LT: return (s) ? -2 : -1; break;
} // endswitch opc
- pin = (opc == OP_LE) ? (X > 0) : TRUE;
+ pin = (opc == OP_LE) ? (X > 0) : true;
} else if (Sup == Ndif) {
// Min value is greater than max list value
if (opc == OP_GT || opc == OP_GE || opc == OP_NE)
@@ -956,7 +956,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
// the only possible overlaps between the array and the block are:
// Array: +-------+ +-------+ +-------+ +-----+
// Block: +-----+ +---+ +------+ +--------+
- // TRUE: pax pin pax pin
+ // true: pax pin pax pin
if (all) switch (opc) {
case OP_GT:
case OP_GE: return (pax) ? -1 : 0; break;
@@ -1052,7 +1052,7 @@ void ARRAY::Print(PGLOBAL, char *ps, uint z)
/***********************************************************************/
/* MULAR public constructor. */
/***********************************************************************/
-MULAR::MULAR(PGLOBAL g, int n) : CSORT(FALSE)
+MULAR::MULAR(PGLOBAL g, int n) : CSORT(false)
{
Narray = n;
Pars = (PARRAY*)PlugSubAlloc(g, NULL, n * sizeof(PARRAY));
@@ -1075,7 +1075,7 @@ int MULAR::Qcompare(int *i1, int *i2)
/***********************************************************************/
/* Sort and eliminate distinct values from multiple arrays. */
/* Note: this is done by making a sorted index on distinct values. */
-/* Returns FALSE if Ok or TRUE in case of error. */
+/* Returns false if Ok or true in case of error. */
/***********************************************************************/
bool MULAR::Sort(PGLOBAL g)
{
@@ -1087,7 +1087,7 @@ bool MULAR::Sort(PGLOBAL g)
for (n = 1; n < Narray; n++)
if (Pars[n]->Nval != nval) {
strcpy(g->Message, MSG(BAD_ARRAY_VAL));
- return TRUE;
+ return true;
} // endif nval
// Prepare non conservative sort with offet values
@@ -1161,10 +1161,10 @@ bool MULAR::Sort(PGLOBAL g)
Pars[n]->Top = ndif; // Find searches the whole array.
} // endfor n
- return FALSE;
+ return false;
error:
PlgDBfree(Index);
PlgDBfree(Offset);
- return TRUE;
+ return true;
} // end of Sort
diff --git a/storage/connect/array.h b/storage/connect/array.h
index 6fb38ae6b47..dfc3638de8a 100644
--- a/storage/connect/array.h
+++ b/storage/connect/array.h
@@ -1,7 +1,7 @@
/**************** Array H Declares Source Code File (.H) ***************/
/* Name: ARRAY.H Version 3.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* This file contains the ARRAY and VALBASE derived classes declares. */
/***********************************************************************/
@@ -53,8 +53,8 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock
using XOBJECT::GetIntValue;
virtual void Reset(void) {Bot = -1;}
virtual int Qcompare(int *, int *);
- virtual bool Compare(PXOB) {assert(FALSE); return FALSE;}
- virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(FALSE); return FALSE;}
+ virtual bool Compare(PXOB) {assert(false); return false;}
+ virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(false); return false;}
//virtual int CheckSpcCol(PTDB, int) {return 0;}
virtual void Print(PGLOBAL g, FILE *f, uint n);
virtual void Print(PGLOBAL g, char *ps, uint z);
diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp
index 80b405be041..58841387249 100644
--- a/storage/connect/colblk.cpp
+++ b/storage/connect/colblk.cpp
@@ -1,7 +1,7 @@
/************* Colblk C++ Functions Source Code File (.CPP) ************/
-/* Name: COLBLK.CPP Version 2.1 */
+/* Name: COLBLK.CPP Version 2.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains the COLBLK class functions. */
/***********************************************************************/
@@ -300,7 +300,7 @@ FIDBLK::FIDBLK(PCOLUMN cp, OPVAL op) : SPCBLK(cp), Op(op)
#if defined(__WIN__)
Format.Prec = 1; // Case insensitive
#endif // __WIN__
- Constant = (!((PTDBASE)To_Tdb)->GetDef()->GetMultiple() &&
+ Constant = (!To_Tdb->GetDef()->GetMultiple() &&
To_Tdb->GetAmType() != TYPE_AM_PLG &&
To_Tdb->GetAmType() != TYPE_AM_PLM);
Fn = NULL;
@@ -312,11 +312,11 @@ FIDBLK::FIDBLK(PCOLUMN cp, OPVAL op) : SPCBLK(cp), Op(op)
/***********************************************************************/
void FIDBLK::ReadColumn(PGLOBAL g)
{
- if (Fn != ((PTDBASE)To_Tdb)->GetFile(g)) {
+ if (Fn != To_Tdb->GetFile(g)) {
char filename[_MAX_PATH];
- Fn = ((PTDBASE)To_Tdb)->GetFile(g);
- PlugSetPath(filename, Fn, ((PTDBASE)To_Tdb)->GetPath());
+ Fn = To_Tdb->GetFile(g);
+ PlugSetPath(filename, Fn, To_Tdb->GetPath());
if (Op != OP_XX) {
char buff[_MAX_PATH];
@@ -378,10 +378,8 @@ void PRTBLK::ReadColumn(PGLOBAL g)
{
if (Pname == NULL) {
char *p;
- PTDBASE tdbp = (PTDBASE)To_Tdb;
-
- Pname = tdbp->GetDef()->GetStringCatInfo(g, "partname", "?");
+ Pname = To_Tdb->GetDef()->GetStringCatInfo(g, "partname", "?");
p = strrchr(Pname, '#');
Value->SetValue_psz((p) ? p + 1 : Pname);
} // endif Pname
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 460d47bcf62..a17c5dafa43 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -157,23 +157,22 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname)
/* Returns valid: true if this is a table info. */
/***********************************************************************/
bool CntInfo(PGLOBAL g, PTDB tp, PXF info)
- {
- bool b;
- PTDBDOS tdbp= (PTDBDOS)tp;
+{
+ if (tp) {
+ bool b = (tp->GetFtype() == RECFM_NAF);
+ PTDBDOS tdbp = b ? NULL : (PTDBDOS)tp;
- if (tdbp) {
- b= tdbp->GetFtype() != RECFM_NAF;
- info->data_file_length= (b) ? (ulonglong)tdbp->GetFileLength(g) : 0;
+ info->data_file_length = (b) ? 0 : (ulonglong)tdbp->GetFileLength(g);
- if (!b || info->data_file_length)
- info->records= (unsigned)tdbp->Cardinality(g);
-// info->records= (unsigned)tdbp->GetMaxSize(g);
+ if (b || info->data_file_length)
+ info->records= (unsigned)tp->Cardinality(g);
+// info->records= (unsigned)tp->GetMaxSize(g);
else
info->records= 0;
// info->mean_rec_length= tdbp->GetLrecl();
info->mean_rec_length= 0;
- info->data_file_name= (b) ? tdbp->GetFile(g) : NULL;
+ info->data_file_name= (b) ? NULL : tdbp->GetFile(g);
return true;
} else {
info->data_file_length= 0;
@@ -183,7 +182,7 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info)
return false;
} // endif tdbp
- } // end of CntInfo
+} // end of CntInfo
/***********************************************************************/
/* GetTDB: Get the table description block of a CONNECT table. */
@@ -332,9 +331,9 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
} // endfor colp
// Attach the updated columns list to the main table
- ((PTDBASE)tdbp)->SetSetCols(utp->GetColumns());
+ tdbp->SetSetCols(utp->GetColumns());
} else if (tdbp && mode == MODE_INSERT)
- ((PTDBASE)tdbp)->SetSetCols(tdbp->GetColumns());
+ tdbp->SetSetCols(tdbp->GetColumns());
// Now do open the physical table
if (trace)
@@ -343,7 +342,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
//tdbp->SetMode(mode);
- if (del/* && ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF*/) {
+ if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) {
// To avoid erasing the table when doing a partial delete
// make a fake Next
// PDOSDEF ddp= new(g) DOSDEF;
@@ -436,7 +435,7 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp)
if (!tdbp)
return RC_FX;
- else if (((PTDBASE)tdbp)->GetKindex()) {
+ else if (tdbp->GetKindex()) {
// Reading sequencially an indexed table. This happens after the
// handler function records_in_range was called and MySQL decides
// to quit using the index (!!!) Drop the index.
@@ -483,7 +482,7 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp)
{
RCODE rc;
PCOL colp;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
if (!tdbp)
return RC_FX;
@@ -501,13 +500,13 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp)
} // endif rc
// Store column values in table write buffer(s)
- for (colp= tp->GetSetCols(); colp; colp= colp->GetNext())
+ for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext())
if (!colp->GetColUse(U_VIRTUAL))
colp->WriteColumn(g);
- if (tp->IsIndexed())
+ if (tdbp->IsIndexed())
// Index values must be sorted before updating
- rc= (RCODE)((PTDBDOS)tp)->GetTxfp()->StoreValues(g, true);
+ rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true);
else
// Return result code from write operation
rc= (RCODE)tdbp->WriteDB(g);
@@ -535,7 +534,7 @@ RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp)
RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
{
RCODE rc;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
if (!tdbp || tdbp->GetMode() != MODE_DELETE)
return RC_FX;
@@ -543,16 +542,16 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
return RC_NF;
if (all) {
- if (((PTDBASE)tdbp)->GetDef()->Indexable())
+ if (tdbp->GetDef()->Indexable())
((PTDBDOS)tdbp)->Cardinal= 0;
// Note: if all, this call will be done when closing the table
rc= (RCODE)tdbp->DeleteDB(g, RC_FX);
-//} else if (tp->GetKindex() && !tp->GetKindex()->IsSorted() &&
-// tp->Txfp->GetAmType() != TYPE_AM_DBF) {
- } else if(tp->IsIndexed()) {
+//} else if (tdbp->GetKindex() && !((PTDBASE)tdbp)->GetKindex()->IsSorted() &&
+// ((PTDBASE)tdbp)->Txfp->GetAmType() != TYPE_AM_DBF) {
+ } else if(tdbp->IsIndexed()) {
// Index values must be sorted before updating
- rc= (RCODE)((PTDBDOS)tp)->GetTxfp()->StoreValues(g, false);
+ rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, false);
} else // Return result code from delete operation
rc= (RCODE)tdbp->DeleteDB(g, RC_OK);
@@ -565,7 +564,7 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
{
int rc= RC_OK;
- TDBASE *tbxp= (PTDBASE)tdbp;
+//TDBASE *tbxp= (PTDBASE)tdbp;
if (!tdbp)
return rc; // Nothing to do
@@ -581,13 +580,13 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
tdbp, tdbp->GetMode(), nox, abort);
if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) {
- if (tbxp->IsIndexed())
+ if (tdbp->IsIndexed())
rc= ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g);
if (!rc)
rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine
- } else if (tbxp->GetMode() == MODE_UPDATE && tbxp->IsIndexed())
+ } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed())
rc= ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g);
switch(rc) {
@@ -595,7 +594,7 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
abort= true;
break;
case RC_INFO:
- PushWarning(g, tbxp);
+ PushWarning(g, tdbp);
break;
} // endswitch rc
@@ -631,11 +630,13 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
if (trace > 1)
printf("About to reset opt\n");
- // Make all the eventual indexes
- tbxp= (TDBDOX*)tdbp;
- tbxp->ResetKindex(g, NULL);
- tbxp->SetKey_Col(NULL);
- rc= tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
+ if (!tdbp->IsRemote()) {
+ // Make all the eventual indexes
+ PTDBDOX tbxp = (PTDBDOX)tdbp;
+ tbxp->ResetKindex(g, NULL);
+ tbxp->SetKey_Col(NULL);
+ rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
+ } // endif remote
err:
if (trace > 1)
@@ -657,10 +658,10 @@ int CntIndexInit(PGLOBAL g, PTDB ptdb, int id, bool sorted)
if (!ptdb)
return -1;
- else if (!((PTDBASE)ptdb)->GetDef()->Indexable()) {
+ else if (!ptdb->GetDef()->Indexable()) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
return 0;
- } else if (((PTDBASE)ptdb)->GetDef()->Indexable() == 3) {
+ } else if (ptdb->GetDef()->Indexable() == 3) {
return 1;
} else
tdbp= (PTDBDOX)ptdb;
@@ -745,7 +746,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op,
if (!ptdb)
return RC_FX;
else
- x= ((PTDBASE)ptdb)->GetDef()->Indexable();
+ x= ptdb->GetDef()->Indexable();
if (!x) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
@@ -875,7 +876,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len,
if (!ptdb)
return -1;
- x= ((PTDBASE)ptdb)->GetDef()->Indexable();
+ x= ptdb->GetDef()->Indexable();
if (!x) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp
index eb9660b439d..1622ec16c68 100644
--- a/storage/connect/domdoc.cpp
+++ b/storage/connect/domdoc.cpp
@@ -116,7 +116,9 @@ bool DOMDOC::ParseFile(PGLOBAL g, char *fn)
// Parse an in memory document
char *xdoc = GetMemDoc(g, fn);
- b = (xdoc) ? (bool)Docp->loadXML((_bstr_t)xdoc) : false;
+ // This is not equivalent to load for UTF8 characters
+ // It is why get node content is not the same
+ b = (xdoc) ? (bool)Docp->loadXML((_bstr_t)xdoc) : false;
} else
// Load the document
b = (bool)Docp->load((_bstr_t)fn);
@@ -266,6 +268,7 @@ DOMNODE::DOMNODE(PXDOC dp, MSXML2::IXMLDOMNodePtr np) : XMLNODE(dp)
Nodep = np;
Ws = NULL;
Len = 0;
+ Zip = (bool)dp->zip;
} // end of DOMNODE constructor
/******************************************************************/
@@ -316,8 +319,10 @@ RCODE DOMNODE::GetContent(PGLOBAL g, char *buf, int len)
RCODE rc = RC_OK;
// Nodep can be null for a missing HTML table column
- if (Nodep) {
- if (!WideCharToMultiByte(CP_UTF8, 0, Nodep->text, -1,
+ if (Nodep) {
+ if (Zip) {
+ strcpy(buf, Nodep->text);
+ } else if (!WideCharToMultiByte(CP_UTF8, 0, Nodep->text, -1,
buf, len, NULL, NULL)) {
DWORD lsr = GetLastError();
diff --git a/storage/connect/domdoc.h b/storage/connect/domdoc.h
index cfec98a9422..7f269002d59 100644
--- a/storage/connect/domdoc.h
+++ b/storage/connect/domdoc.h
@@ -93,6 +93,7 @@ class DOMNODE : public XMLNODE {
char Name[64];
WCHAR *Ws;
int Len;
+ bool Zip;
}; // end of class DOMNODE
/******************************************************************/
diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp
index 94c562a9981..8fffaca3d06 100644
--- a/storage/connect/filamap.cpp
+++ b/storage/connect/filamap.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -45,6 +45,7 @@
#include "maputil.h"
#include "filamap.h"
#include "tabdos.h"
+#include "tabfmt.h"
/* --------------------------- Class MAPFAM -------------------------- */
@@ -322,17 +323,20 @@ int MAPFAM::ReadBuffer(PGLOBAL g)
int rc, len;
// Are we at the end of the memory
- if (Mempos >= Top)
+ if (Mempos >= Top) {
if ((rc = GetNext(g)) != RC_OK)
return rc;
+ else if (Tdbp->GetAmType() == TYPE_AM_CSV && ((PTDBCSV)Tdbp)->Header)
+ if ((rc = SkipRecord(g, true)) != RC_OK)
+ return rc;
+
+ } // endif Mempos
if (!Placed) {
/*******************************************************************/
/* Record file position in case of UPDATE or DELETE. */
/*******************************************************************/
- int rc;
-
next:
Fpos = Mempos;
CurBlk = (int)Rows++;
diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp
index a4557facbd8..9feb61d7d61 100644
--- a/storage/connect/filamdbf.cpp
+++ b/storage/connect/filamdbf.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -281,15 +281,25 @@ PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info)
/************************************************************************/
switch (thisfield.Type) {
case 'C': // Characters
- case 'L': // Logical 'T' or 'F'
- type = TYPE_STRING;
+ case 'L': // Logical 'T' or 'F' or space
+ type = TYPE_STRING;
+ break;
+ case 'M': // Memo a .DBT block number
+ case 'B': // Binary a .DBT block number
+ case 'G': // Ole a .DBT block number
+ type = TYPE_STRING;
break;
+ //case 'I': // Long
+ //case '+': // Autoincrement
+ // type = TYPE_INT;
+ // break;
case 'N':
type = (thisfield.Decimals) ? TYPE_DOUBLE
: (len > 10) ? TYPE_BIGINT : TYPE_INT;
break;
- case 'F':
- type = TYPE_DOUBLE;
+ case 'F': // Float
+ //case 'O': // Double
+ type = TYPE_DOUBLE;
break;
case 'D':
type = TYPE_DATE; // Is this correct ???
@@ -441,6 +451,7 @@ int DBFFAM::Cardinality(PGLOBAL g)
if (Accept) {
Lrecl = rln;
+ Blksize = Nrec * rln;
PushWarning(g, Tdbp);
} else
return -1;
@@ -582,6 +593,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
if (Accept) {
Lrecl = reclen;
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return true;
@@ -598,7 +610,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
header->Filedate[1] = datm->tm_mon + 1;
header->Filedate[2] = datm->tm_mday;
header->SetHeadlen((ushort)hlen);
- header->SetReclen((ushort)reclen);
+ header->SetReclen(reclen);
descp = (DESCRIPTOR*)header;
// Currently only standard Xbase types are supported
@@ -664,6 +676,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
if (Accept) {
Lrecl = header.Reclen();
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return true;
@@ -956,6 +969,7 @@ int DBMFAM::Cardinality(PGLOBAL g)
if (Accept) {
Lrecl = rln;
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return -1;
@@ -1008,6 +1022,7 @@ bool DBMFAM::AllocateBuffer(PGLOBAL g)
if (Accept) {
Lrecl = hp->Reclen();
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return true;
diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp
index 07242ea633c..dc6f277ee27 100644
--- a/storage/connect/filamgz.cpp
+++ b/storage/connect/filamgz.cpp
@@ -724,20 +724,20 @@ void ZBKFAM::Rewind(void)
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIXFAM::ZIXFAM(PDOSDEF tdp) : ZBKFAM(tdp)
+GZXFAM::GZXFAM(PDOSDEF tdp) : ZBKFAM(tdp)
{
//Block = tdp->GetBlock();
//Last = tdp->GetLast();
Nrec = (tdp->GetElemt()) ? tdp->GetElemt() : DOS_BUFF_LEN;
Blksize = Nrec * Lrecl;
- } // end of ZIXFAM standard constructor
+ } // end of GZXFAM standard constructor
/***********************************************************************/
/* ZIX Cardinality: returns table cardinality in number of rows. */
/* This function can be called with a null argument to test the */
/* availability of Cardinality implementation (1 yes, 0 no). */
/***********************************************************************/
-int ZIXFAM::Cardinality(PGLOBAL g)
+int GZXFAM::Cardinality(PGLOBAL g)
{
if (Last)
return (g) ? (int)((Block - 1) * Nrec + Last) : 1;
@@ -750,7 +750,7 @@ int ZIXFAM::Cardinality(PGLOBAL g)
/* Allocate the line buffer. For mode Delete a bigger buffer has to */
/* be allocated because is it also used to move lines into the file. */
/***********************************************************************/
-bool ZIXFAM::AllocateBuffer(PGLOBAL g)
+bool GZXFAM::AllocateBuffer(PGLOBAL g)
{
Buflen = Blksize;
To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen);
@@ -788,7 +788,7 @@ bool ZIXFAM::AllocateBuffer(PGLOBAL g)
/***********************************************************************/
/* ReadBuffer: Read one line from a compressed text file. */
/***********************************************************************/
-int ZIXFAM::ReadBuffer(PGLOBAL g)
+int GZXFAM::ReadBuffer(PGLOBAL g)
{
int n, rc = RC_OK;
@@ -850,7 +850,7 @@ int ZIXFAM::ReadBuffer(PGLOBAL g)
/* WriteDB: Data Base write routine for ZDOS access method. */
/* Update is not possible without using a temporary file (NIY). */
/***********************************************************************/
-int ZIXFAM::WriteBuffer(PGLOBAL g)
+int GZXFAM::WriteBuffer(PGLOBAL g)
{
/*********************************************************************/
/* In Insert mode, blocs are added sequentialy to the file end. */
diff --git a/storage/connect/filamgz.h b/storage/connect/filamgz.h
index d667fdddcc2..7a00c0d4bc7 100644
--- a/storage/connect/filamgz.h
+++ b/storage/connect/filamgz.h
@@ -12,7 +12,7 @@
typedef class GZFAM *PGZFAM;
typedef class ZBKFAM *PZBKFAM;
-typedef class ZIXFAM *PZIXFAM;
+typedef class GZXFAM *PZIXFAM;
typedef class ZLBFAM *PZLBFAM;
/***********************************************************************/
@@ -101,16 +101,16 @@ class DllExport ZBKFAM : public GZFAM {
/* length files compressed using the gzip library functions. */
/* The file is always accessed by block. */
/***********************************************************************/
-class DllExport ZIXFAM : public ZBKFAM {
+class DllExport GZXFAM : public ZBKFAM {
public:
// Constructor
- ZIXFAM(PDOSDEF tdp);
- ZIXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {}
+ GZXFAM(PDOSDEF tdp);
+ GZXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {}
// Implementation
virtual int GetNextPos(void) {return 0;}
virtual PTXF Duplicate(PGLOBAL g)
- {return (PTXF)new(g) ZIXFAM(this);}
+ {return (PTXF)new(g) GZXFAM(this);}
// Methods
virtual int Cardinality(PGLOBAL g);
@@ -120,7 +120,7 @@ class DllExport ZIXFAM : public ZBKFAM {
protected:
// No additional Members
- }; // end of class ZIXFAM
+ }; // end of class GZXFAM
/***********************************************************************/
/* This is the DOS/UNIX Access Method class declaration for PlugDB */
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index 65013e170e4..3d157da5e87 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -1,11 +1,11 @@
/*********** File AM Zip C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMZIP */
/* ------------- */
-/* Version 1.0 */
+/* Version 1.1 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -19,13 +19,16 @@
#include "my_global.h"
#if !defined(__WIN__)
#if defined(UNIX)
+#include <fnmatch.h>
#include <errno.h>
+#include <dirent.h>
#include <unistd.h>
#else // !UNIX
#include <io.h>
#endif // !UNIX
#include <fcntl.h>
#endif // !__WIN__
+#include <time.h>
/***********************************************************************/
/* Include application header files: */
@@ -40,12 +43,346 @@
//#include "tabzip.h"
#include "filamzip.h"
+#define WRITEBUFFERSIZE (16384)
+
+bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul);
+
+/***********************************************************************/
+/* Compress a file in zip when creating a table. */
+/***********************************************************************/
+static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf)
+{
+ int rc = RC_OK, size_read, size_buf = WRITEBUFFERSIZE;
+ FILE *fin;
+
+ if (zutp->addEntry(g, entry))
+ return true;
+ else if (!(fin = fopen(fn, "rb"))) {
+ sprintf(g->Message, "error in opening %s for reading", fn);
+ return true;
+ } // endif fin
+
+ do {
+ size_read = (int)fread(buf, 1, size_buf, fin);
+
+ if (size_read < size_buf && feof(fin) == 0) {
+ sprintf(g->Message, "error in reading %s", fn);
+ rc = RC_FX;
+ } // endif size_read
+
+ if (size_read > 0) {
+ rc = zutp->writeEntry(g, buf, size_read);
+
+ if (rc == RC_FX)
+ sprintf(g->Message, "error in writing %s in the zipfile", fn);
+
+ } // endif size_read
+
+ } while (rc == RC_OK && size_read > 0);
+
+ fclose(fin);
+ zutp->closeEntry();
+ return rc != RC_OK;
+} // end of ZipFile
+
+/***********************************************************************/
+/* Find and Compress several files in zip when creating a table. */
+/***********************************************************************/
+static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf)
+{
+ char filename[_MAX_PATH];
+ int rc;
+
+ /*********************************************************************/
+ /* pat is a multiple file name with wildcard characters */
+ /*********************************************************************/
+ strcpy(filename, pat);
+
+#if defined(__WIN__)
+ char drive[_MAX_DRIVE], direc[_MAX_DIR];
+ WIN32_FIND_DATA FileData;
+ HANDLE hSearch;
+
+ _splitpath(filename, drive, direc, NULL, NULL);
+
+ // Start searching files in the target directory.
+ hSearch = FindFirstFile(filename, &FileData);
+
+ if (hSearch == INVALID_HANDLE_VALUE) {
+ rc = GetLastError();
+
+ if (rc != ERROR_FILE_NOT_FOUND) {
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, GetLastError(), 0, (LPTSTR)&filename, sizeof(filename), NULL);
+ sprintf(g->Message, MSG(BAD_FILE_HANDLE), filename);
+ return true;
+ } else {
+ strcpy(g->Message, "Cannot find any file to load");
+ return true;
+ } // endif rc
+
+ } // endif hSearch
+
+ while (true) {
+ if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName);
+
+ if (ZipFile(g, zutp, filename, FileData.cFileName, buf)) {
+ FindClose(hSearch);
+ return true;
+ } // endif ZipFile
+
+ } // endif dwFileAttributes
+
+ if (!FindNextFile(hSearch, &FileData)) {
+ rc = GetLastError();
+
+ if (rc != ERROR_NO_MORE_FILES) {
+ sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc);
+ FindClose(hSearch);
+ return true;
+ } // endif rc
+
+ break;
+ } // endif FindNextFile
+
+ } // endwhile n
+
+ // Close the search handle.
+ if (!FindClose(hSearch)) {
+ strcpy(g->Message, MSG(SRCH_CLOSE_ERR));
+ return true;
+ } // endif FindClose
+
+#else // !__WIN__
+ struct stat fileinfo;
+ char fn[FN_REFLEN], direc[FN_REFLEN], pattern[FN_HEADLEN], ftype[FN_EXTLEN];
+ DIR *dir;
+ struct dirent *entry;
+
+ _splitpath(filename, NULL, direc, pattern, ftype);
+ strcat(pattern, ftype);
+
+ // Start searching files in the target directory.
+ if (!(dir = opendir(direc))) {
+ sprintf(g->Message, MSG(BAD_DIRECTORY), direc, strerror(errno));
+ return true;
+ } // endif dir
+
+ while ((entry = readdir(dir))) {
+ strcat(strcpy(fn, direc), entry->d_name);
+
+ if (lstat(fn, &fileinfo) < 0) {
+ sprintf(g->Message, "%s: %s", fn, strerror(errno));
+ return true;
+ } else if (!S_ISREG(fileinfo.st_mode))
+ continue; // Not a regular file (should test for links)
+
+ /*******************************************************************/
+ /* Test whether the file name matches the table name filter. */
+ /*******************************************************************/
+ if (fnmatch(pattern, entry->d_name, 0))
+ continue; // Not a match
+
+ strcat(strcpy(filename, direc), entry->d_name);
+
+ if (ZipFile(g, zutp, filename, entry->d_name, buf)) {
+ closedir(dir);
+ return true;
+ } // endif ZipFile
+
+ } // endwhile readdir
+
+ // Close the dir handle.
+ closedir(dir);
+#endif // !__WIN__
+
+ return false;
+} // end of ZipFiles
+
+/***********************************************************************/
+/* Load and Compress a file in zip when creating a table. */
+/***********************************************************************/
+bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul)
+{
+ char *buf;
+ bool err;
+ ZIPUTIL *zutp = new(g) ZIPUTIL(NULL);
+
+ if (zutp->open(g, zfn, append))
+ return true;
+
+ buf = (char*)PlugSubAlloc(g, NULL, WRITEBUFFERSIZE);
+
+ if (mul)
+ err = ZipFiles(g, zutp, fn, buf);
+ else
+ err = ZipFile(g, zutp, fn, entry, buf);
+
+ zutp->close();
+ return err;
+} // end of ZipLoadFile
+
/* -------------------------- class ZIPUTIL -------------------------- */
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIPUTIL::ZIPUTIL(PSZ tgt, bool mul)
+ZIPUTIL::ZIPUTIL(PSZ tgt)
+{
+ zipfile = NULL;
+ target = tgt;
+ fp = NULL;
+ entryopen = false;
+} // end of ZIPUTIL standard constructor
+
+#if 0
+ZIPUTIL::ZIPUTIL(ZIPUTIL *zutp)
+{
+ zipfile = zutp->zipfile;
+ target = zutp->target;
+ fp = zutp->fp;
+ entryopen = zutp->entryopen;
+} // end of UNZIPUTL copy constructor
+#endif // 0
+
+/***********************************************************************/
+/* Fill the zip time structure */
+/* param: tmZip time structure to be filled */
+/***********************************************************************/
+void ZIPUTIL::getTime(tm_zip& tmZip)
+{
+ time_t rawtime;
+ time(&rawtime);
+ struct tm *timeinfo = localtime(&rawtime);
+ tmZip.tm_sec = timeinfo->tm_sec;
+ tmZip.tm_min = timeinfo->tm_min;
+ tmZip.tm_hour = timeinfo->tm_hour;
+ tmZip.tm_mday = timeinfo->tm_mday;
+ tmZip.tm_mon = timeinfo->tm_mon;
+ tmZip.tm_year = timeinfo->tm_year;
+} // end of getTime
+
+/***********************************************************************/
+/* open a zip file for deflate. */
+/* param: filename path and the filename of the zip file to open. */
+/* append: set true to append the zip file */
+/* return: true if open, false otherwise. */
+/***********************************************************************/
+bool ZIPUTIL::open(PGLOBAL g, char *filename, bool append)
+{
+ if (!zipfile && !(zipfile = zipOpen64(filename,
+ append ? APPEND_STATUS_ADDINZIP
+ : APPEND_STATUS_CREATE)))
+ sprintf(g->Message, "Zipfile open error on %s", filename);
+
+ return (zipfile == NULL);
+} // end of open
+
+/***********************************************************************/
+/* Close the zip file. */
+/***********************************************************************/
+void ZIPUTIL::close()
+{
+ if (zipfile) {
+ closeEntry();
+ zipClose(zipfile, 0);
+ zipfile = NULL;
+ } // endif zipfile
+
+} // end of close
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/***********************************************************************/
+bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append)
+{
+ /*********************************************************************/
+ /* The file will be compressed. */
+ /*********************************************************************/
+ if (mode == MODE_INSERT) {
+ bool b = open(g, fn, append);
+
+ if (!b) {
+ if (addEntry(g, target))
+ return true;
+
+ /*****************************************************************/
+ /* Link a Fblock. This make possible to automatically close it */
+ /* in case of error g->jump. */
+ /*****************************************************************/
+ PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr;
+
+ fp = (PFBLOCK)PlugSubAlloc(g, NULL, sizeof(FBLOCK));
+ fp->Type = TYPE_FB_ZIP;
+ fp->Fname = PlugDup(g, fn);
+ fp->Next = dbuserp->Openlist;
+ dbuserp->Openlist = fp;
+ fp->Count = 1;
+ fp->Length = 0;
+ fp->Memory = NULL;
+ fp->Mode = mode;
+ fp->File = this;
+ fp->Handle = 0;
+ } else
+ return true;
+
+ } else {
+ strcpy(g->Message, "Only INSERT mode supported for ZIPPING files");
+ return true;
+ } // endif mode
+
+ return false;
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* Add target in zip file. */
+/***********************************************************************/
+bool ZIPUTIL::addEntry(PGLOBAL g, char *entry)
+{
+ //?? we dont need the stinking time
+ zip_fileinfo zi = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ getTime(zi.tmz_date);
+ target = entry;
+
+ int err = zipOpenNewFileInZip(zipfile, target, &zi,
+ NULL, 0, NULL, 0, NULL, Z_DEFLATED, Z_DEFAULT_COMPRESSION);
+
+ return !(entryopen = (err == ZIP_OK));
+} // end of addEntry
+
+/***********************************************************************/
+/* writeEntry: Deflate the buffer to the zip file. */
+/***********************************************************************/
+int ZIPUTIL::writeEntry(PGLOBAL g, char *buf, int len)
+{
+ if (zipWriteInFileInZip(zipfile, buf, len) < 0) {
+ sprintf(g->Message, "Error writing %s in the zipfile", target);
+ return RC_FX;
+ } // endif zipWriteInFileInZip
+
+ return RC_OK;
+} // end of writeEntry
+
+/***********************************************************************/
+/* Close the zip file. */
+/***********************************************************************/
+void ZIPUTIL::closeEntry()
+{
+ if (entryopen) {
+ zipCloseFileInZip(zipfile);
+ entryopen = false;
+ } // endif entryopen
+
+} // end of closeEntry
+
+/* ------------------------- class UNZIPUTL -------------------------- */
+
+/***********************************************************************/
+/* Constructors. */
+/***********************************************************************/
+UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul)
{
zipfile = NULL;
target = tgt;
@@ -62,10 +399,10 @@ ZIPUTIL::ZIPUTIL(PSZ tgt, bool mul)
#else
for (int i = 0; i < 256; ++i) mapCaseTable[i] = i;
#endif
-} // end of ZIPUTIL standard constructor
+} // end of UNZIPUTL standard constructor
#if 0
-ZIPUTIL::ZIPUTIL(PZIPUTIL zutp)
+UNZIPUTL::UNZIPUTL(PZIPUTIL zutp)
{
zipfile = zutp->zipfile;
target = zutp->target;
@@ -74,14 +411,14 @@ ZIPUTIL::ZIPUTIL(PZIPUTIL zutp)
entryopen = zutp->entryopen;
multiple = zutp->multiple;
for (int i = 0; i < 256; ++i) mapCaseTable[i] = zutp->mapCaseTable[i];
-} // end of ZIPUTIL copy constructor
+} // end of UNZIPUTL copy constructor
#endif // 0
/***********************************************************************/
/* This code is the copyright property of Alessandro Felice Cantatore. */
/* http://xoomer.virgilio.it/acantato/dev/wildcard/wildmatch.html */
/***********************************************************************/
-bool ZIPUTIL::WildMatch(PSZ pat, PSZ str) {
+bool UNZIPUTL::WildMatch(PSZ pat, PSZ str) {
PSZ s, p;
bool star = FALSE;
@@ -97,7 +434,7 @@ loopStart:
if (!*++pat) return TRUE;
goto loopStart;
default:
- if (mapCaseTable[*s] != mapCaseTable[*p])
+ if (mapCaseTable[(uchar)*s] != mapCaseTable[(uchar)*p])
goto starCheck;
break;
} /* endswitch */
@@ -116,7 +453,7 @@ starCheck:
/* param: filename path and the filename of the zip file to open. */
/* return: true if open, false otherwise. */
/***********************************************************************/
-bool ZIPUTIL::open(PGLOBAL g, char *filename)
+bool UNZIPUTL::open(PGLOBAL g, char *filename)
{
if (!zipfile && !(zipfile = unzOpen64(filename)))
sprintf(g->Message, "Zipfile open error on %s", filename);
@@ -127,7 +464,7 @@ bool ZIPUTIL::open(PGLOBAL g, char *filename)
/***********************************************************************/
/* Close the zip file. */
/***********************************************************************/
-void ZIPUTIL::close()
+void UNZIPUTL::close()
{
if (zipfile) {
closeEntry();
@@ -140,7 +477,7 @@ void ZIPUTIL::close()
/***********************************************************************/
/* Find next entry matching target pattern. */
/***********************************************************************/
-int ZIPUTIL::findEntry(PGLOBAL g, bool next)
+int UNZIPUTL::findEntry(PGLOBAL g, bool next)
{
int rc;
@@ -151,7 +488,7 @@ int ZIPUTIL::findEntry(PGLOBAL g, bool next)
if (rc == UNZ_END_OF_LIST_OF_FILE)
return RC_EF;
else if (rc != UNZ_OK) {
- sprintf(g->Message, "unzGoToNextFile rc = ", rc);
+ sprintf(g->Message, "unzGoToNextFile rc = %d", rc);
return RC_FX;
} // endif rc
@@ -183,7 +520,7 @@ int ZIPUTIL::findEntry(PGLOBAL g, bool next)
/***********************************************************************/
/* Get the next used entry. */
/***********************************************************************/
-int ZIPUTIL::nextEntry(PGLOBAL g)
+int UNZIPUTL::nextEntry(PGLOBAL g)
{
if (multiple) {
int rc;
@@ -206,7 +543,7 @@ int ZIPUTIL::nextEntry(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn)
+bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn)
{
/*********************************************************************/
/* The file will be decompressed into virtual memory. */
@@ -261,14 +598,14 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn)
fp->Memory = memory;
fp->Mode = mode;
fp->File = this;
- fp->Handle = NULL;
+ fp->Handle = 0;
} // endif fp
} else
return true;
} else {
- strcpy(g->Message, "Only READ mode supported for ZIP files");
+ strcpy(g->Message, "Only READ mode supported for ZIPPED tables");
return true;
} // endif mode
@@ -278,7 +615,7 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn)
/***********************************************************************/
/* Open target in zip file. */
/***********************************************************************/
-bool ZIPUTIL::openEntry(PGLOBAL g)
+bool UNZIPUTL::openEntry(PGLOBAL g)
{
int rc;
@@ -297,7 +634,7 @@ bool ZIPUTIL::openEntry(PGLOBAL g)
memory = new char[size + 1];
if ((rc = unzReadCurrentFile(zipfile, memory, size)) < 0) {
- sprintf(g->Message, "unzReadCurrentFile rc = ", rc);
+ sprintf(g->Message, "unzReadCurrentFile rc = %d", rc);
unzCloseCurrentFile(zipfile);
free(memory);
memory = NULL;
@@ -316,7 +653,7 @@ bool ZIPUTIL::openEntry(PGLOBAL g)
/***********************************************************************/
/* Close the zip file. */
/***********************************************************************/
-void ZIPUTIL::closeEntry()
+void UNZIPUTL::closeEntry()
{
if (entryopen) {
unzCloseCurrentFile(zipfile);
@@ -330,36 +667,29 @@ void ZIPUTIL::closeEntry()
} // end of closeEntry
-/* -------------------------- class ZIPFAM --------------------------- */
+/* -------------------------- class UNZFAM --------------------------- */
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIPFAM::ZIPFAM(PDOSDEF tdp) : MAPFAM(tdp)
+UNZFAM::UNZFAM(PDOSDEF tdp) : MAPFAM(tdp)
{
zutp = NULL;
target = tdp->GetEntry();
mul = tdp->GetMul();
-} // end of ZIPFAM standard constructor
-
-ZIPFAM::ZIPFAM(PZIPFAM txfp) : MAPFAM(txfp)
-{
- zutp = txfp->zutp;
- target = txfp->target;
- mul = txfp->mul;
-} // end of ZIPFAM copy constructor
+} // end of UNZFAM standard constructor
-ZIPFAM::ZIPFAM(PDOSDEF tdp, PZPXFAM txfp) : MAPFAM(tdp)
+UNZFAM::UNZFAM(PUNZFAM txfp) : MAPFAM(txfp)
{
zutp = txfp->zutp;
target = txfp->target;
mul = txfp->mul;
-} // end of ZIPFAM constructor used in ResetTableOpt
+} // end of UNZFAM copy constructor
/***********************************************************************/
/* ZIP GetFileLength: returns file size in number of bytes. */
/***********************************************************************/
-int ZIPFAM::GetFileLength(PGLOBAL g)
+int UNZFAM::GetFileLength(PGLOBAL g)
{
int len = (zutp && zutp->entryopen) ? Top - Memory
: TXTFAM::GetFileLength(g) * 3;
@@ -373,7 +703,7 @@ int ZIPFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/
/* ZIP Cardinality: return the number of rows if possible. */
/***********************************************************************/
-int ZIPFAM::Cardinality(PGLOBAL g)
+int UNZFAM::Cardinality(PGLOBAL g)
{
if (!g)
return 1;
@@ -388,7 +718,7 @@ int ZIPFAM::Cardinality(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZIPFAM::OpenTableFile(PGLOBAL g)
+bool UNZFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
MODE mode = Tdbp->GetMode();
@@ -396,7 +726,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
/*********************************************************************/
/* Allocate the ZIP utility class. */
/*********************************************************************/
- zutp = new(g) ZIPUTIL(target, mul);
+ zutp = new(g) UNZIPUTL(target, mul);
// We used the file name relative to recorded datapath
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -415,7 +745,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
/***********************************************************************/
/* GetNext: go to next entry. */
/***********************************************************************/
-int ZIPFAM::GetNext(PGLOBAL g)
+int UNZFAM::GetNext(PGLOBAL g)
{
int rc = zutp->nextEntry(g);
@@ -431,7 +761,7 @@ int ZIPFAM::GetNext(PGLOBAL g)
/***********************************************************************/
/* ReadBuffer: Read one line for a ZIP file. */
/***********************************************************************/
-int ZIPFAM::ReadBuffer(PGLOBAL g)
+int UNZFAM::ReadBuffer(PGLOBAL g)
{
int rc, len;
@@ -497,37 +827,37 @@ int ZIPFAM::ReadBuffer(PGLOBAL g)
/***********************************************************************/
/* Table file close routine for MAP access method. */
/***********************************************************************/
-void ZIPFAM::CloseTableFile(PGLOBAL g, bool)
+void UNZFAM::CloseTableFile(PGLOBAL g, bool)
{
close();
} // end of CloseTableFile
#endif // 0
-/* -------------------------- class ZPXFAM --------------------------- */
+/* -------------------------- class UZXFAM --------------------------- */
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZPXFAM::ZPXFAM(PDOSDEF tdp) : MPXFAM(tdp)
+UZXFAM::UZXFAM(PDOSDEF tdp) : MPXFAM(tdp)
{
zutp = NULL;
target = tdp->GetEntry();
mul = tdp->GetMul();
//Lrecl = tdp->GetLrecl();
-} // end of ZPXFAM standard constructor
+} // end of UZXFAM standard constructor
-ZPXFAM::ZPXFAM(PZPXFAM txfp) : MPXFAM(txfp)
+UZXFAM::UZXFAM(PUZXFAM txfp) : MPXFAM(txfp)
{
zutp = txfp->zutp;
target = txfp->target;
mul = txfp->mul;
//Lrecl = txfp->Lrecl;
-} // end of ZPXFAM copy constructor
+} // end of UZXFAM copy constructor
/***********************************************************************/
/* ZIP GetFileLength: returns file size in number of bytes. */
/***********************************************************************/
-int ZPXFAM::GetFileLength(PGLOBAL g)
+int UZXFAM::GetFileLength(PGLOBAL g)
{
int len;
@@ -545,7 +875,7 @@ int ZPXFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/
/* ZIP Cardinality: return the number of rows if possible. */
/***********************************************************************/
-int ZPXFAM::Cardinality(PGLOBAL g)
+int UZXFAM::Cardinality(PGLOBAL g)
{
if (!g)
return 1;
@@ -566,7 +896,7 @@ int ZPXFAM::Cardinality(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZPXFAM::OpenTableFile(PGLOBAL g)
+bool UZXFAM::OpenTableFile(PGLOBAL g)
{
// May have been already opened in GetFileLength
if (!zutp || !zutp->zipfile) {
@@ -577,7 +907,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g)
/* Allocate the ZIP utility class. */
/*********************************************************************/
if (!zutp)
- zutp = new(g)ZIPUTIL(target, mul);
+ zutp = new(g)UNZIPUTL(target, mul);
// We used the file name relative to recorded datapath
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -600,7 +930,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g)
/***********************************************************************/
/* GetNext: go to next entry. */
/***********************************************************************/
-int ZPXFAM::GetNext(PGLOBAL g)
+int UZXFAM::GetNext(PGLOBAL g)
{
int rc = zutp->nextEntry(g);
@@ -620,3 +950,146 @@ int ZPXFAM::GetNext(PGLOBAL g)
return RC_OK;
} // end of GetNext
+/* -------------------------- class ZIPFAM --------------------------- */
+
+/***********************************************************************/
+/* Constructor. */
+/***********************************************************************/
+ZIPFAM::ZIPFAM(PDOSDEF tdp) : DOSFAM(tdp)
+{
+ zutp = NULL;
+ target = tdp->GetEntry();
+ append = tdp->GetAppend();
+} // end of ZIPFAM standard constructor
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/***********************************************************************/
+bool ZIPFAM::OpenTableFile(PGLOBAL g)
+{
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+
+ /*********************************************************************/
+ /* Allocate the ZIP utility class. */
+ /*********************************************************************/
+ zutp = new(g) ZIPUTIL(target);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!zutp->OpenTable(g, mode, filename, append)) {
+ To_Fb = zutp->fp; // Useful when closing
+ } else
+ return true;
+
+ return AllocateBuffer(g);
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* ReadBuffer: Read one line for a ZIP file. */
+/***********************************************************************/
+int ZIPFAM::ReadBuffer(PGLOBAL g)
+{
+ strcpy(g->Message, "ReadBuffer should not been called when zipping");
+ return RC_FX;
+} // end of ReadBuffer
+
+/***********************************************************************/
+/* WriteBuffer: Deflate the buffer to the zip file. */
+/***********************************************************************/
+int ZIPFAM::WriteBuffer(PGLOBAL g)
+{
+ int len;
+
+ // Prepare to write the new line
+ strcat(strcpy(To_Buf, Tdbp->GetLine()), (Bin) ? CrLf : "\n");
+ len = strchr(To_Buf, '\n') - To_Buf + 1;
+ return zutp->writeEntry(g, To_Buf, len);
+} // end of WriteBuffer
+
+/***********************************************************************/
+/* Table file close routine for ZIP access method. */
+/***********************************************************************/
+void ZIPFAM::CloseTableFile(PGLOBAL g, bool)
+{
+ To_Fb->Count = 0;
+ zutp->close();
+} // end of CloseTableFile
+
+/* -------------------------- class ZPXFAM --------------------------- */
+
+/***********************************************************************/
+/* Constructor. */
+/***********************************************************************/
+ZPXFAM::ZPXFAM(PDOSDEF tdp) : FIXFAM(tdp)
+{
+ zutp = NULL;
+ target = tdp->GetEntry();
+ append = tdp->GetAppend();
+ //Lrecl = tdp->GetLrecl();
+} // end of UZXFAM standard constructor
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/***********************************************************************/
+bool ZPXFAM::OpenTableFile(PGLOBAL g)
+{
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+
+ /*********************************************************************/
+ /* Allocate the ZIP utility class. */
+ /*********************************************************************/
+ zutp = new(g) ZIPUTIL(target);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!zutp->OpenTable(g, mode, filename, append)) {
+ To_Fb = zutp->fp; // Useful when closing
+ } else
+ return true;
+
+ return AllocateBuffer(g);
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* WriteBuffer: Deflate the buffer to the zip file. */
+/***********************************************************************/
+int ZPXFAM::WriteBuffer(PGLOBAL g)
+{
+ /*********************************************************************/
+ /* In Insert mode, we write only full blocks. */
+ /*********************************************************************/
+ if (++CurNum != Rbuf) {
+ Tdbp->IncLine(Lrecl); // Used by DOSCOL functions
+ return RC_OK;
+ } // endif CurNum
+
+ // Now start the compress process.
+ if (zutp->writeEntry(g, To_Buf, Lrecl * Rbuf) != RC_OK) {
+ Closing = true;
+ return RC_FX;
+ } // endif writeEntry
+
+ CurBlk++;
+ CurNum = 0;
+ Tdbp->SetLine(To_Buf);
+ return RC_OK;
+} // end of WriteBuffer
+
+/***********************************************************************/
+/* Table file close routine for ZIP access method. */
+/***********************************************************************/
+void ZPXFAM::CloseTableFile(PGLOBAL g, bool)
+{
+ if (CurNum && !Closing) {
+ // Some more inserted lines remain to be written
+ Rbuf = CurNum--;
+ WriteBuffer(g);
+ } // endif Curnum
+
+ To_Fb->Count = 0;
+ zutp->close();
+} // end of CloseTableFile
diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h
index 9312fb2f70e..3160703bd20 100644
--- a/storage/connect/filamzip.h
+++ b/storage/connect/filamzip.h
@@ -1,7 +1,7 @@
/************** filamzip H Declares Source Code File (.H) **************/
-/* Name: filamzip.h Version 1.0 */
+/* Name: filamzip.h Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* This file contains the ZIP file access method classes declares. */
/***********************************************************************/
@@ -10,10 +10,14 @@
#include "block.h"
#include "filamap.h"
+#include "filamfix.h"
+#include "zip.h"
#include "unzip.h"
#define DLLEXPORT extern "C"
+typedef class UNZFAM *PUNZFAM;
+typedef class UZXFAM *PUZXFAM;
typedef class ZIPFAM *PZIPFAM;
typedef class ZPXFAM *PZPXFAM;
@@ -21,16 +25,50 @@ typedef class ZPXFAM *PZPXFAM;
/* This is the ZIP utility fonctions class. */
/***********************************************************************/
class DllExport ZIPUTIL : public BLOCK {
-public:
+ public:
// Constructor
- ZIPUTIL(PSZ tgt, bool mul);
-//ZIPUTIL(ZIPUTIL *zutp);
+ ZIPUTIL(PSZ tgt);
+ //ZIPUTIL(ZIPUTIL *zutp);
// Implementation
-//PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)ZIPFAM(this); }
+ //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); }
// Methods
- virtual bool OpenTable(PGLOBAL g, MODE mode, char *fn);
+ bool OpenTable(PGLOBAL g, MODE mode, char *fn, bool append);
+ bool open(PGLOBAL g, char *fn, bool append);
+ bool addEntry(PGLOBAL g, char *entry);
+ void close(void);
+ void closeEntry(void);
+ int writeEntry(PGLOBAL g, char *buf, int len);
+ void getTime(tm_zip& tmZip);
+
+ // Members
+ zipFile zipfile; // The ZIP container file
+ PSZ target; // The target file name
+//unz_file_info finfo; // The current file info
+ PFBLOCK fp;
+//char *memory;
+//uint size;
+//int multiple; // Multiple targets
+ bool entryopen; // True when open current entry
+//char fn[FILENAME_MAX]; // The current entry file name
+//char mapCaseTable[256];
+}; // end of ZIPUTIL
+
+/***********************************************************************/
+/* This is the unZIP utility fonctions class. */
+/***********************************************************************/
+class DllExport UNZIPUTL : public BLOCK {
+ public:
+ // Constructor
+ UNZIPUTL(PSZ tgt, bool mul);
+//UNZIPUTL(UNZIPUTL *zutp);
+
+ // Implementation
+//PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); }
+
+ // Methods
+ bool OpenTable(PGLOBAL g, MODE mode, char *fn);
bool open(PGLOBAL g, char *fn);
bool openEntry(PGLOBAL g);
void close(void);
@@ -50,68 +88,120 @@ public:
bool entryopen; // True when open current entry
char fn[FILENAME_MAX]; // The current entry file name
char mapCaseTable[256];
-}; // end of ZIPFAM
+}; // end of UNZIPUTL
/***********************************************************************/
-/* This is the ZIP file access method. */
+/* This is the unzip file access method. */
/***********************************************************************/
-class DllExport ZIPFAM : public MAPFAM {
- friend class ZPXFAM;
-public:
+class DllExport UNZFAM : public MAPFAM {
+//friend class UZXFAM;
+ public:
// Constructors
- ZIPFAM(PDOSDEF tdp);
- ZIPFAM(PZIPFAM txfp);
- ZIPFAM(PDOSDEF tdp, PZPXFAM txfp);
+ UNZFAM(PDOSDEF tdp);
+ UNZFAM(PUNZFAM txfp);
// Implementation
- virtual AMT GetAmType(void) { return TYPE_AM_ZIP; }
- virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)ZIPFAM(this); }
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+ virtual PTXF Duplicate(PGLOBAL g) {return (PTXF) new(g) UNZFAM(this);}
// Methods
virtual int Cardinality(PGLOBAL g);
virtual int GetFileLength(PGLOBAL g);
-//virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ //virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
virtual bool OpenTableFile(PGLOBAL g);
virtual bool DeferReading(void) { return false; }
virtual int GetNext(PGLOBAL g);
-//virtual int ReadBuffer(PGLOBAL g);
-//virtual int WriteBuffer(PGLOBAL g);
-//virtual int DeleteRecords(PGLOBAL g, int irc);
-//virtual void CloseTableFile(PGLOBAL g, bool abort);
+ //virtual int ReadBuffer(PGLOBAL g);
+ //virtual int WriteBuffer(PGLOBAL g);
+ //virtual int DeleteRecords(PGLOBAL g, int irc);
+ //virtual void CloseTableFile(PGLOBAL g, bool abort);
-protected:
+ protected:
// Members
- ZIPUTIL *zutp;
- PSZ target;
- bool mul;
-}; // end of ZIPFAM
+ UNZIPUTL *zutp;
+ PSZ target;
+ bool mul;
+}; // end of UNZFAM
/***********************************************************************/
-/* This is the fixed ZIP file access method. */
+/* This is the fixed unzip file access method. */
/***********************************************************************/
-class DllExport ZPXFAM : public MPXFAM {
- friend class ZIPFAM;
-public:
+class DllExport UZXFAM : public MPXFAM {
+//friend class UNZFAM;
+ public:
// Constructors
- ZPXFAM(PDOSDEF tdp);
- ZPXFAM(PZPXFAM txfp);
+ UZXFAM(PDOSDEF tdp);
+ UZXFAM(PUZXFAM txfp);
// Implementation
virtual AMT GetAmType(void) { return TYPE_AM_ZIP; }
- virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)ZPXFAM(this); }
+ virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UZXFAM(this); }
// Methods
virtual int GetFileLength(PGLOBAL g);
virtual int Cardinality(PGLOBAL g);
virtual bool OpenTableFile(PGLOBAL g);
virtual int GetNext(PGLOBAL g);
-//virtual int ReadBuffer(PGLOBAL g);
+ //virtual int ReadBuffer(PGLOBAL g);
+
+ protected:
+ // Members
+ UNZIPUTL *zutp;
+ PSZ target;
+ bool mul;
+}; // end of UZXFAM
+
+/***********************************************************************/
+/* This is the zip file access method. */
+/***********************************************************************/
+class DllExport ZIPFAM : public DOSFAM {
+ public:
+ // Constructors
+ ZIPFAM(PDOSDEF tdp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g) {return 0;}
+ virtual int GetFileLength(PGLOBAL g) {return g ? 0 : 1;}
+ //virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ //virtual int DeleteRecords(PGLOBAL g, int irc);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+
+ protected:
+ // Members
+ ZIPUTIL *zutp;
+ PSZ target;
+ bool append;
+}; // end of ZIPFAM
+
+/***********************************************************************/
+/* This is the fixed zip file access method. */
+/***********************************************************************/
+class DllExport ZPXFAM : public FIXFAM {
+ public:
+ // Constructors
+ ZPXFAM(PDOSDEF tdp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g) {return 0;}
+ virtual int GetFileLength(PGLOBAL g) {return g ? 0 : 1;}
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
-protected:
+ protected:
// Members
ZIPUTIL *zutp;
PSZ target;
- bool mul;
+ bool append;
}; // end of ZPXFAM
#endif // __FILAMZIP_H
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 33f02d8338c..d1ab18f52d5 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2016
+/* Copyright (C) Olivier Bertrand 2004 - 2017
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -125,6 +125,8 @@
#endif // UNIX
#include "global.h"
#include "plgdbsem.h"
+#include "xtable.h"
+#include "tabext.h"
#if defined(ODBC_SUPPORT)
#include "odbccat.h"
#endif // ODBC_SUPPORT
@@ -132,12 +134,11 @@
#include "tabjdbc.h"
#include "jdbconn.h"
#endif // JDBC_SUPPORT
-#include "xtable.h"
#include "tabmysql.h"
#include "filamdbf.h"
#include "tabxcl.h"
#include "tabfmt.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabcol.h"
#include "xindex.h"
#if defined(__WIN__)
@@ -171,9 +172,9 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.05.0001 December 13, 2016";
+ char version[]= "Version 1.05.0003 February 27, 2017";
#if defined(__WIN__)
- char compver[]= "Version 1.05.0001 " __DATE__ " " __TIME__;
+ char compver[]= "Version 1.05.0003 " __DATE__ " " __TIME__;
char slash= '\\';
#else // !__WIN__
char slash= '/';
@@ -214,6 +215,7 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v);
void PushWarning(PGLOBAL g, THD *thd, int level);
bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
const char *db, char *tab, const char *src, int port);
+bool ZipLoadFile(PGLOBAL, char*, char*, char*, bool, bool);
bool ExactInfo(void);
USETEMP UseTemp(void);
int GetConvSize(void);
@@ -556,7 +558,7 @@ ha_create_table_option connect_index_option_list[]=
/***********************************************************************/
/* Push G->Message as a MySQL warning. */
/***********************************************************************/
-bool PushWarning(PGLOBAL g, PTDBASE tdbp, int level)
+bool PushWarning(PGLOBAL g, PTDB tdbp, int level)
{
PHC phc;
THD *thd;
@@ -1024,7 +1026,7 @@ char *GetListOption(PGLOBAL g, const char *opname,
char key[16], val[256];
char *pk, *pv, *pn;
- char *opval= (char*) def;
+ char *opval= (char*)def;
int n;
for (pk= (char*)oplist; pk; pk= ++pn) {
@@ -1032,26 +1034,17 @@ char *GetListOption(PGLOBAL g, const char *opname,
pv= strchr(pk, '=');
if (pv && (!pn || pv < pn)) {
- n= pv - pk;
+ n= MY_MIN(pv - pk, sizeof(key) - 1);
memcpy(key, pk, n);
key[n]= 0;
pv++;
-
- if (pn) {
- n= pn - pv;
- memcpy(val, pv, n);
- val[n]= 0;
- } else
- strcpy(val, pv);
-
+ n= MY_MIN((pn ? pn - pv : strlen(pv)), sizeof(val) - 1);
+ memcpy(val, pv, n);
+ val[n]= 0;
} else {
- if (pn) {
- n= MY_MIN(pn - pk, 15);
- memcpy(key, pk, n);
- key[n]= 0;
- } else
- strcpy(key, pk);
-
+ n= MY_MIN((pn ? pn - pk : strlen(pk)), sizeof(key) - 1);
+ memcpy(key, pk, n);
+ key[n]= 0;
val[0]= 0;
} // endif pv
@@ -1105,7 +1098,7 @@ char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef)
else if (!stricmp(opname, "Data_charset"))
opval= options->data_charset;
- if (!opval && options && options->oplist)
+ if (!opval && options->oplist)
opval= GetListOption(g, opname, options->oplist);
return opval ? (char*)opval : sdef;
@@ -2113,7 +2106,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *)
PCOL colp;
PVAL value, sdvalin;
Field *fp;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
String attribute(attr_buffer, sizeof(attr_buffer),
table->s->table_charset);
my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set);
@@ -2132,7 +2125,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *)
&& tdbp->GetAmType() != TYPE_AM_ODBC
&& tdbp->GetAmType() != TYPE_AM_JDBC) ||
bitmap_is_set(table->write_set, fp->field_index)) {
- for (colp= tp->GetSetCols(); colp; colp= colp->GetNext())
+ for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext())
if (!stricmp(colp->GetName(), fp->field_name))
break;
@@ -2219,7 +2212,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *)
} else if (xmod == MODE_UPDATE) {
PCOL cp;
- for (cp= tp->GetColumns(); cp; cp= cp->GetNext())
+ for (cp= tdbp->GetColumns(); cp; cp= cp->GetNext())
if (!stricmp(colp->GetName(), cp->GetName()))
break;
@@ -2686,7 +2679,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
{
AMT tty = filp->Type;
char *body= filp->Body;
- unsigned int i;
+ char *havg= filp->Having;
+ unsigned int i;
bool ismul= false, x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC);
bool nonul= ((tty == TYPE_AM_ODBC || tty == TYPE_AM_JDBC) &&
(tdbp->GetMode() == MODE_INSERT || tdbp->GetMode() == MODE_DELETE));
@@ -2699,7 +2693,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
htrc("Cond type=%d\n", cond->type());
if (cond->type() == COND::COND_ITEM) {
- char *p1, *p2;
+ char *pb0, *pb1, *pb2, *ph0, *ph1, *ph2;
+ bool bb = false, bh = false;
Item_cond *cond_item= (Item_cond *)cond;
if (x)
@@ -2719,38 +2714,78 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
List_iterator<Item> li(*arglist);
const Item *subitem;
- p1= body + strlen(body);
- strcpy(p1, "(");
- p2= p1 + 1;
+ pb0= pb1= body + strlen(body);
+ strcpy(pb0, "(");
+ pb2= pb1 + 1;
+
+ if (havg) {
+ ph0= ph1= havg + strlen(havg);
+ strcpy(ph0, "(");
+ ph2= ph1 + 1;
+ } // endif havg
for (i= 0; i < arglist->elements; i++)
if ((subitem= li++)) {
if (!CheckCond(g, filp, subitem)) {
if (vop == OP_OR || nonul)
return NULL;
- else
- *p2= 0;
+ else {
+ *pb2= 0;
+ if (havg) *ph2= 0;
+ } // endelse
} else {
- p1= p2 + strlen(p2);
- strcpy(p1, GetValStr(vop, false));
- p2= p1 + strlen(p1);
+ if (filp->Bd) {
+ pb1= pb2 + strlen(pb2);
+ strcpy(pb1, GetValStr(vop, false));
+ pb2= pb1 + strlen(pb1);
+ } // endif Bd
+
+ if (filp->Hv) {
+ ph1= ph2 + strlen(ph2);
+ strcpy(ph1, GetValStr(vop, false));
+ ph2= ph1 + strlen(ph1);
+ } // endif Hv
+
} // endif CheckCond
+ bb |= filp->Bd;
+ bh |= filp->Hv;
+ filp->Bd = filp->Hv = false;
} else
return NULL;
- if (*p1 != '(')
- strcpy(p1, ")");
- else
- return NULL;
+ if (bb) {
+ strcpy(pb1, ")");
+ filp->Bd = bb;
+ } else
+ *pb0= 0;
+
+ if (havg) {
+ if (bb && bh && vop == OP_OR) {
+ // Cannot or'ed a where clause with a having clause
+ bb= bh= 0;
+ *pb0 = 0;
+ *ph0 = 0;
+ } else if (bh) {
+ strcpy(ph1, ")");
+ filp->Hv= bh;
+ } else
+ *ph0 = 0;
+
+ } // endif havg
+
+ if (!bb && !bh)
+ return NULL;
} else if (cond->type() == COND::FUNC_ITEM) {
unsigned int i;
- bool iscol, neg= FALSE;
+ bool iscol, ishav= false, neg= false;
Item_func *condf= (Item_func *)cond;
Item* *args= condf->arguments();
+ filp->Bd = filp->Hv = false;
+
if (trace)
htrc("Func type=%d argnum=%d\n", condf->functype(),
condf->argument_count());
@@ -2799,8 +2834,9 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
ha_field_option_struct *fop;
Item_field *pField= (Item_field *)args[i];
- if (x && i)
- return NULL;
+ // IN and BETWEEN clauses should be col VOP list
+ if (i && (x || ismul))
+ return NULL; // IN and BETWEEN clauses should be col VOP list
else if (pField->field->table != table)
return NULL; // Field does not belong to this table
else if (tty != TYPE_AM_WMI && IsIndexed(pField->field))
@@ -2816,10 +2852,19 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
else
return NULL;
- } else if (tty == TYPE_AM_TBL)
- return NULL;
- else
- fnm= pField->field->field_name;
+ } else if (tty == TYPE_AM_TBL) {
+ return NULL;
+ } else {
+ bool h;
+
+ fnm = filp->Chk(pField->field->field_name, &h);
+
+ if (h && i && !ishav)
+ return NULL; // Having should be col VOP arg
+ else
+ ishav = h;
+
+ } // endif's
if (trace) {
htrc("Field index=%d\n", pField->field->field_index);
@@ -2828,11 +2873,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
htrc("Field_type=%d\n", args[i]->field_type());
} // endif trace
- // IN and BETWEEN clauses should be col VOP list
- if (i && ismul)
- return NULL;
-
- strcat(body, fnm);
+ strcat((ishav ? havg : body), fnm);
} else if (args[i]->type() == COND::FUNC_ITEM) {
if (tty == TYPE_AM_MYSQL) {
if (!CheckCond(g, filp, args[i]))
@@ -2871,32 +2912,34 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
return NULL;
if (!x) {
+ char *s = (ishav) ? havg : body;
+
// Append the value to the filter
switch (args[i]->field_type()) {
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
if (tty == TYPE_AM_ODBC) {
- strcat(body, "{ts '");
- strncat(body, res->ptr(), res->length());
+ strcat(s, "{ts '");
+ strncat(s, res->ptr(), res->length());
if (res->length() < 19)
- strcat(body, "1970-01-01 00:00:00" + res->length());
+ strcat(s, "1970-01-01 00:00:00" + res->length());
- strcat(body, "'}");
+ strcat(s, "'}");
break;
} // endif ODBC
case MYSQL_TYPE_DATE:
if (tty == TYPE_AM_ODBC) {
- strcat(body, "{d '");
- strcat(strncat(body, res->ptr(), res->length()), "'}");
+ strcat(s, "{d '");
+ strcat(strncat(s, res->ptr(), res->length()), "'}");
break;
} // endif ODBC
case MYSQL_TYPE_TIME:
if (tty == TYPE_AM_ODBC) {
- strcat(body, "{t '");
- strcat(strncat(body, res->ptr(), res->length()), "'}");
+ strcat(s, "{t '");
+ strcat(strncat(s, res->ptr(), res->length()), "'}");
break;
} // endif ODBC
@@ -2905,39 +2948,39 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
switch (args[0]->field_type()) {
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
- strcat(body, "{ts '");
- strncat(body, res->ptr(), res->length());
+ strcat(s, "{ts '");
+ strncat(s, res->ptr(), res->length());
if (res->length() < 19)
- strcat(body, "1970-01-01 00:00:00" + res->length());
+ strcat(s, "1970-01-01 00:00:00" + res->length());
- strcat(body, "'}");
+ strcat(s, "'}");
break;
case MYSQL_TYPE_DATE:
- strcat(body, "{d '");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'}");
+ strcat(s, "{d '");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'}");
break;
case MYSQL_TYPE_TIME:
- strcat(body, "{t '");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'}");
+ strcat(s, "{t '");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'}");
break;
default:
- strcat(body, "'");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'");
+ strcat(s, "'");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'");
} // endswitch field type
} else {
- strcat(body, "'");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'");
+ strcat(s, "'");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'");
} // endif tty
break;
default:
- strncat(body, res->ptr(), res->length());
+ strncat(s, res->ptr(), res->length());
} // endswitch field type
} else {
@@ -2953,22 +2996,28 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
} // endif x
- } // endif
+ } // endif's Type
if (!x) {
- if (!i)
- strcat(body, GetValStr(vop, neg));
+ char *s = (ishav) ? havg : body;
+
+ if (!i)
+ strcat(s, GetValStr(vop, neg));
else if (vop == OP_XX && i == 1)
- strcat(body, " AND ");
+ strcat(s, " AND ");
else if (vop == OP_IN)
- strcat(body, (i == condf->argument_count() - 1) ? ")" : ",");
+ strcat(s, (i == condf->argument_count() - 1) ? ")" : ",");
} // endif x
} // endfor i
- if (x)
- filp->Op= vop;
+ if (x)
+ filp->Op = vop;
+ else if (ishav)
+ filp->Hv = true;
+ else
+ filp->Bd = true;
} else {
if (trace)
@@ -3025,16 +3074,28 @@ const COND *ha_connect::cond_push(const COND *cond)
if (b) {
PCFIL filp;
+ int rc;
if ((filp= tdbp->GetCondFil()) && filp->Cond == cond &&
filp->Idx == active_index && filp->Type == tty)
goto fin; // Already done
filp= new(g) CONDFIL(cond, active_index, tty);
- filp->Body= (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
- *filp->Body= 0;
+ rc = filp->Init(g, this);
+
+ if (rc == RC_INFO) {
+ filp->Having = (char*)PlugSubAlloc(g, NULL, 256);
+ *filp->Having = 0;
+ } else if (rc == RC_FX)
+ goto fin;
+
+ filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
+ *filp->Body = 0;
if (CheckCond(g, filp, cond)) {
+ if (filp->Having && strlen(filp->Having) > 255)
+ goto fin; // Memory collapse
+
if (trace)
htrc("cond_push: %s\n", filp->Body);
@@ -3207,9 +3268,9 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*)
tdbp= GetTDB(g);
dup->Check |= CHK_OPT;
- if (tdbp) {
+ if (tdbp && !tdbp->IsRemote()) {
bool dop= IsTypeIndexable(GetRealType(NULL));
- bool dox= (((PTDBASE)tdbp)->GetDef()->Indexable() == 1);
+ bool dox= (tdbp->GetDef()->Indexable() == 1);
if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) {
if (rc == RC_INFO) {
@@ -3220,7 +3281,7 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*)
} // endif rc
- } else
+ } else if (!tdbp)
rc= HA_ERR_INTERNAL_ERROR;
return rc;
@@ -3463,9 +3524,9 @@ int ha_connect::index_init(uint idx, bool sorted)
htrc("index_init CONNECT: %s\n", g->Message);
active_index= MAX_KEY;
rc= HA_ERR_INTERNAL_ERROR;
- } else if (((PTDBDOX)tdbp)->To_Kindex) {
+ } else if (tdbp->GetKindex()) {
if (((PTDBDOX)tdbp)->To_Kindex->GetNum_K()) {
- if (((PTDBASE)tdbp)->GetFtype() != RECFM_NAF)
+ if (tdbp->GetFtype() != RECFM_NAF)
((PTDBDOX)tdbp)->GetTxfp()->ResetBuffer(g);
active_index= idx;
@@ -3879,11 +3940,10 @@ int ha_connect::rnd_next(uchar *buf)
void ha_connect::position(const uchar *)
{
DBUG_ENTER("ha_connect::position");
-//if (((PTDBASE)tdbp)->GetDef()->Indexable())
- my_store_ptr(ref, ref_length, (my_off_t)((PTDBASE)tdbp)->GetRecpos());
+ my_store_ptr(ref, ref_length, (my_off_t)tdbp->GetRecpos());
if (trace > 1)
- htrc("position: pos=%d\n", ((PTDBASE)tdbp)->GetRecpos());
+ htrc("position: pos=%d\n", tdbp->GetRecpos());
DBUG_VOID_RETURN;
} // end of position
@@ -3908,14 +3968,14 @@ void ha_connect::position(const uchar *)
int ha_connect::rnd_pos(uchar *buf, uchar *pos)
{
int rc;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
DBUG_ENTER("ha_connect::rnd_pos");
- if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) {
+ if (!tdbp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) {
if (trace)
- htrc("rnd_pos: %d\n", tp->GetRecpos());
+ htrc("rnd_pos: %d\n", tdbp->GetRecpos());
- tp->SetFilter(NULL);
+ tdbp->SetFilter(NULL);
rc= rnd_next(buf);
} else
rc= HA_ERR_KEY_NOT_FOUND;
@@ -4093,7 +4153,7 @@ int ha_connect::delete_all_rows()
if (tdbp && tdbp->GetUse() == USE_OPEN &&
tdbp->GetAmType() != TYPE_AM_XML &&
- ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF)
+ tdbp->GetFtype() != RECFM_NAF)
// Close and reopen the table so it will be deleted
rc= CloseTable(g);
@@ -4471,12 +4531,12 @@ int ha_connect::external_lock(THD *thd, int lock_type)
if (!tdbp) {
if (!(tdbp= GetTDB(g)))
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
- else if (!((PTDBASE)tdbp)->GetDef()->Indexable()) {
+ else if (!tdbp->GetDef()->Indexable()) {
sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName());
// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
DBUG_RETURN(0);
- } else if (((PTDBASE)tdbp)->GetDef()->Indexable() == 1) {
+ } else if (tdbp->GetDef()->Indexable() == 1) {
bool oldsep= ((PCHK)g->Xchk)->oldsep;
bool newsep= ((PCHK)g->Xchk)->newsep;
PTDBDOS tdp= (PTDBDOS)tdbp;
@@ -4557,7 +4617,7 @@ int ha_connect::external_lock(THD *thd, int lock_type)
rc= 0;
} // endif MakeIndex
- } else if (((PTDBASE)tdbp)->GetDef()->Indexable() == 3) {
+ } else if (tdbp->GetDef()->Indexable() == 3) {
if (CheckVirtualIndex(NULL)) {
// Make it a warning to avoid crash
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -5173,7 +5233,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
TABLE_SHARE *table_s,
HA_CREATE_INFO *create_info)
{
- char v=0, spc= ',', qch= 0;
+ char v=0;
const char *fncn= "?";
const char *user, *fn, *db, *host, *pwd, *sep, *tbl, *src;
const char *col, *ocl, *rnk, *pic, *fcl, *skc, *zfn;
@@ -5225,8 +5285,6 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
fncn= topt->catfunc;
fnc= GetFuncID(fncn);
sep= topt->separator;
- spc= (!sep) ? ',' : *sep;
- qch= topt->qchar ? *topt->qchar : (signed)topt->quoted >= 0 ? '"' : 0;
mul = (int)topt->multiple;
tbl= topt->tablist;
col= topt->colist;
@@ -5426,8 +5484,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
if (mydef->GetPassword())
pwd= mydef->GetPassword();
- if (mydef->GetDatabase())
- db= mydef->GetDatabase();
+ if (mydef->GetTabschema())
+ db = mydef->GetTabschema();
if (mydef->GetTabname())
tab= mydef->GetTabname();
@@ -6053,8 +6111,8 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (mydef->GetHostname())
host= mydef->GetHostname();
- if (mydef->GetDatabase())
- db= mydef->GetDatabase();
+ if (mydef->GetTabschema())
+ db = mydef->GetTabschema();
if (mydef->GetTabname())
tab= mydef->GetTabname();
@@ -6267,21 +6325,26 @@ int ha_connect::create(const char *name, TABLE *table_arg,
// Check for incompatible options
if (options->sepindex) {
my_message(ER_UNKNOWN_ERROR,
- "SEPINDEX is incompatible with unspecified file name",
- MYF(0));
+ "SEPINDEX is incompatible with unspecified file name", MYF(0));
DBUG_RETURN(HA_ERR_UNSUPPORTED);
- } else if (GetTypeID(options->type) == TAB_VEC)
- if (!table->s->max_rows || options->split) {
- my_printf_error(ER_UNKNOWN_ERROR,
- "%s tables whose file name is unspecified cannot be split",
- MYF(0), options->type);
- DBUG_RETURN(HA_ERR_UNSUPPORTED);
- } else if (options->header == 2) {
- my_printf_error(ER_UNKNOWN_ERROR,
- "header=2 is not allowed for %s tables whose file name is unspecified",
- MYF(0), options->type);
- DBUG_RETURN(HA_ERR_UNSUPPORTED);
- } // endif's
+ } else if (GetTypeID(options->type) == TAB_VEC) {
+ if (!table->s->max_rows || options->split) {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "%s tables whose file name is unspecified cannot be split",
+ MYF(0), options->type);
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ } else if (options->header == 2) {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "header=2 is not allowed for %s tables whose file name is unspecified",
+ MYF(0), options->type);
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ } // endif's
+
+ } else if (options->zipped) {
+ my_message(ER_UNKNOWN_ERROR,
+ "ZIPPED is incompatible with unspecified file name", MYF(0));
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ } // endif's options
// Fold type to lower case
for (int i= 0; i < 12; i++)
@@ -6334,6 +6397,36 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (trace)
htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas);
+ if (options->zipped) {
+ // Check whether the zip entry must be made from a file
+ char *fn = GetListOption(g, "Load", options->oplist, NULL);
+
+ if (fn) {
+ char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH];
+ char *entry = GetListOption(g, "Entry", options->oplist, NULL);
+ char *a = GetListOption(g, "Append", options->oplist, "NO");
+ bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON");
+ char *m = GetListOption(g, "Mulentries", options->oplist, "NO");
+ bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
+
+ if (!entry && !mul) {
+ my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ } // endif entry
+
+ strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/");
+ PlugSetPath(zbuf, options->filename, dbpath);
+ PlugSetPath(buf, fn, dbpath);
+
+ if (ZipLoadFile(g, zbuf, buf, entry, append, mul)) {
+ my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ } // endif LoadFile
+
+ } // endif fn
+
+ } // endif zipped
+
// To check whether indexes have to be made or remade
if (!g->Xchk) {
PIXDEF xdp;
@@ -6952,10 +7045,10 @@ maria_declare_plugin(connect)
PLUGIN_LICENSE_GPL,
connect_init_func, /* Plugin Init */
connect_done_func, /* Plugin Deinit */
- 0x0104, /* version number (1.04) */
+ 0x0105, /* version number (1.05) */
NULL, /* status variables */
connect_system_variables, /* system variables */
- "1.05.0001", /* string version */
+ "1.05.0003", /* string version */
MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */
}
maria_declare_plugin_end;
diff --git a/storage/connect/ioapi.c b/storage/connect/ioapi.c
index 7f5c191b2af..a49da91f7f0 100644
--- a/storage/connect/ioapi.c
+++ b/storage/connect/ioapi.c
@@ -27,6 +27,7 @@
#include "ioapi.h"
+#include "my_attribute.h"
voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode)
{
@@ -92,7 +93,7 @@ static long ZCALLBACK fseek64_file_func OF((voidpf opaque, voidpf stream, ZPO
static int ZCALLBACK fclose_file_func OF((voidpf opaque, voidpf stream));
static int ZCALLBACK ferror_file_func OF((voidpf opaque, voidpf stream));
-static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, int mode)
+static voidpf ZCALLBACK fopen_file_func (voidpf opaque __attribute__((unused)), const char* filename, int mode)
{
FILE* file = NULL;
const char* mode_fopen = NULL;
@@ -110,7 +111,7 @@ static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, in
return file;
}
-static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, int mode)
+static voidpf ZCALLBACK fopen64_file_func (voidpf opaque __attribute__((unused)), const void* filename, int mode)
{
FILE* file = NULL;
const char* mode_fopen = NULL;
@@ -129,21 +130,21 @@ static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename,
}
-static uLong ZCALLBACK fread_file_func (voidpf opaque, voidpf stream, void* buf, uLong size)
+static uLong ZCALLBACK fread_file_func (voidpf opaque __attribute__((unused)), voidpf stream, void* buf, uLong size)
{
uLong ret;
ret = (uLong)fread(buf, 1, (size_t)size, (FILE *)stream);
return ret;
}
-static uLong ZCALLBACK fwrite_file_func (voidpf opaque, voidpf stream, const void* buf, uLong size)
+static uLong ZCALLBACK fwrite_file_func (voidpf opaque __attribute__((unused)), voidpf stream, const void* buf, uLong size)
{
uLong ret;
ret = (uLong)fwrite(buf, 1, (size_t)size, (FILE *)stream);
return ret;
}
-static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream)
+static long ZCALLBACK ftell_file_func (voidpf opaque __attribute__((unused)), voidpf stream)
{
long ret;
ret = ftell((FILE *)stream);
@@ -151,14 +152,14 @@ static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream)
}
-static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque, voidpf stream)
+static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque __attribute__((unused)), voidpf stream)
{
ZPOS64_T ret;
ret = FTELLO_FUNC((FILE *)stream);
return ret;
}
-static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offset, int origin)
+static long ZCALLBACK fseek_file_func (voidpf opaque __attribute__((unused)), voidpf stream, uLong offset, int origin)
{
int fseek_origin=0;
long ret;
@@ -181,7 +182,7 @@ static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offs
return ret;
}
-static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)
+static long ZCALLBACK fseek64_file_func (voidpf opaque __attribute__((unused)), voidpf stream, ZPOS64_T offset, int origin)
{
int fseek_origin=0;
long ret;
@@ -207,14 +208,14 @@ static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T
}
-static int ZCALLBACK fclose_file_func (voidpf opaque, voidpf stream)
+static int ZCALLBACK fclose_file_func (voidpf opaque __attribute__((unused)), voidpf stream)
{
int ret;
ret = fclose((FILE *)stream);
return ret;
}
-static int ZCALLBACK ferror_file_func (voidpf opaque, voidpf stream)
+static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), voidpf stream)
{
int ret;
ret = ferror((FILE *)stream);
diff --git a/storage/connect/ioapi.h b/storage/connect/ioapi.h
index 8dcbdb06e35..4fa73002053 100644
--- a/storage/connect/ioapi.h
+++ b/storage/connect/ioapi.h
@@ -129,8 +129,9 @@ extern "C" {
#endif
#endif
-
-
+#ifndef OF
+#define OF(args) args
+#endif
typedef voidpf (ZCALLBACK *open_file_func) OF((voidpf opaque, const char* filename, int mode));
typedef uLong (ZCALLBACK *read_file_func) OF((voidpf opaque, voidpf stream, void* buf, uLong size));
diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp
index a69f84a94a1..c1d077406b7 100644
--- a/storage/connect/jdbconn.cpp
+++ b/storage/connect/jdbconn.cpp
@@ -1,7 +1,7 @@
/************ Jdbconn C++ Functions Source Code File (.CPP) ************/
-/* Name: JDBCONN.CPP Version 1.0 */
+/* Name: JDBCONN.CPP Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* This file contains the JDBC connection classes functions. */
/***********************************************************************/
@@ -45,9 +45,9 @@
#include "plgdbsem.h"
#include "xobject.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabjdbc.h"
//#include "jdbconn.h"
-//#include "plgcnx.h" // For DB types
#include "resource.h"
#include "valblk.h"
#include "osutil.h"
@@ -318,13 +318,21 @@ PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat,
/**************************************************************************/
PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp)
{
+ char *sqry;
PQRYRES qrp;
JDBConn *jcp = new(g)JDBConn(g, NULL);
if (jcp->Open(sjp))
return NULL;
- qrp = jcp->GetMetaData(g, src);
+ if (strstr(src, "%s")) {
+ // Place holder for an eventual where clause
+ sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 2);
+ sprintf(sqry, src, "1=1"); // dummy where clause
+ } else
+ sqry = src;
+
+ qrp = jcp->GetMetaData(g, sqry);
jcp->Close();
return qrp;
} // end of JDBCSrcCols
@@ -818,6 +826,11 @@ int JDBConn::Open(PJPARM sop)
jpop->Append(GetPluginDir());
jpop->Append("JdbcInterface.jar");
+ // All wrappers are pre-compiled in JavaWrappers.jar in the plugin dir
+ jpop->Append(sep);
+ jpop->Append(GetPluginDir());
+ jpop->Append("JavaWrappers.jar");
+
//================== prepare loading of Java VM ============================
JavaVMInitArgs vm_args; // Initialization arguments
JavaVMOption* options = new JavaVMOption[N]; // JVM invocation options
@@ -1157,6 +1170,9 @@ void JDBConn::Close()
jint rc;
jmethodID did = nullptr;
+ // Could have been detached in case of join
+ rc = jvm->AttachCurrentThread((void**)&env, nullptr);
+
if (gmID(m_G, did, "JdbcDisconnect", "()I"))
printf("%s\n", Msg);
else if (Check(env->CallIntMethod(job, did)))
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index c45630129f1..b473871e9f7 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -1,7 +1,7 @@
/*************** json CPP Declares Source Code File (.H) ***************/
-/* Name: json.cpp Version 1.2 */
+/* Name: json.cpp Version 1.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
/* */
/* This file contains the JSON classes functions. */
/***********************************************************************/
@@ -27,8 +27,33 @@
#define EL "\r\n"
#else
#define EL "\n"
+#undef SE_CATCH // Does not work for Linux
#endif
+#if defined(SE_CATCH)
+/**************************************************************************/
+/* This is the support of catching C interrupts to prevent crashes. */
+/**************************************************************************/
+#include <eh.h>
+
+class SE_Exception {
+public:
+ SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
+ ~SE_Exception() {}
+
+ unsigned int nSE;
+ PEXCEPTION_RECORD eRec;
+}; // end of class SE_Exception
+
+void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp)
+{
+ throw SE_Exception(u, pExp->ExceptionRecord);
+} // end of trans_func
+
+char *GetExceptionDesc(PGLOBAL g, unsigned int e);
+#endif // SE_CATCH
+
+
/***********************************************************************/
/* Parse a json string. */
/* Note: when pretty is not known, the caller set pretty to 3. */
@@ -40,6 +65,9 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
PJSON jsp = NULL;
STRG src;
+ if (trace)
+ htrc("ParseJson: s=%.10s len=%d\n", s, len);
+
if (!s || !len) {
strcpy(g->Message, "Void JSON object");
return NULL;
@@ -53,15 +81,37 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
pty[0] = false;
+
// Save stack and allocation environment and prepare error return
if (g->jump_level == MAX_JUMP) {
strcpy(g->Message, MSG(TOO_MANY_JUMPS));
return NULL;
} // endif jump_level
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- goto err;
- } // endif rc
+#if defined(SE_CATCH)
+ // Let's try to recover from any kind of interrupt
+ _se_translator_function f = _set_se_translator(trans_func);
+
+ try {
+#endif // SE_CATCH --------------------- try section --------------------
+ if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) {
+ goto err;
+ } // endif rc
+
+#if defined(SE_CATCH) // ------------- end of try section -----------------
+ } catch (SE_Exception e) {
+ sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)",
+ GetExceptionDesc(g, e.nSE), e.nSE);
+ _set_se_translator(f);
+ goto err;
+ } catch (...) {
+ strcpy(g->Message, "Exception doing setjmp");
+ _set_se_translator(f);
+ goto err;
+ } // end of try-catches
+
+ _set_se_translator(f);
+#endif // SE_CATCH
for (i = 0; i < len; i++)
switch (s[i]) {
@@ -140,7 +190,7 @@ tryit:
strcpy(g->Message, "More than one item in file");
err:
- g->jump_level--;
+ g->jump_level--;
return NULL;
} // end of ParseJson
@@ -390,14 +440,14 @@ char *ParseString(PGLOBAL g, int& i, STRG& src)
// if (charset == utf8) {
char xs[5];
uint hex;
-
+
xs[0] = s[++i];
xs[1] = s[++i];
xs[2] = s[++i];
xs[3] = s[++i];
xs[4] = 0;
hex = strtoul(xs, NULL, 16);
-
+
if (hex < 0x80) {
p[n] = (uchar)hex;
} else if (hex < 0x800) {
@@ -414,7 +464,7 @@ char *ParseString(PGLOBAL g, int& i, STRG& src)
} else {
char xs[3];
UINT hex;
-
+
i += 2;
xs[0] = s[++i];
xs[1] = s[++i];
@@ -468,7 +518,7 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
case '.':
if (!found_digit || has_dot || has_e)
goto err;
-
+
has_dot = true;
break;
case 'e':
@@ -769,7 +819,7 @@ bool JOUTSTR::Escape(const char *s)
for (unsigned int i = 0; s[i]; i++)
switch (s[i]) {
- case '"':
+ case '"':
case '\\':
case '\t':
case '\n':
@@ -1057,7 +1107,7 @@ void JARRAY::InitArray(PGLOBAL g)
int i;
PJVAL jvp, *pjvp = &First;
- for (Size = 0, jvp = First; jvp; jvp = jvp->Next)
+ for (Size = 0, jvp = First; jvp; jvp = jvp->Next)
if (!jvp->Del)
Size++;
@@ -1191,8 +1241,8 @@ bool JARRAY::IsNull(void)
/***********************************************************************/
JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
{
- Jsp = NULL;
- Value = AllocateValue(g, valp);
+ Jsp = NULL;
+ Value = AllocateValue(g, valp);
Next = NULL;
Del = false;
} // end of JVALUE constructor
@@ -1297,7 +1347,7 @@ PSZ JVALUE::GetText(PGLOBAL g, PSZ text)
} // end of GetText
void JVALUE::SetValue(PJSON jsp)
-{
+{
if (jsp && jsp->GetType() == TYPE_JVAL) {
Jsp = jsp->GetJsp();
Value = jsp->GetValue();
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index f9034f25739..0f693c3c0d6 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -1,6 +1,6 @@
/****************** jsonudf C++ Program Source Code File (.CPP) ******************/
-/* PROGRAM NAME: jsonudf Version 1.4 */
-/* (C) Copyright to the author Olivier BERTRAND 2015-2016 */
+/* PROGRAM NAME: jsonudf Version 1.5 */
+/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */
/* This program are the JSON User Defined Functions . */
/*********************************************************************************/
@@ -242,13 +242,16 @@ my_bool JSNX::ParseJpath(PGLOBAL g)
// Jpath = Name;
return true;
- pbuf = PlugDup(g, Jpath);
+ if (!(pbuf = PlgDBDup(g, Jpath)))
+ return true;
// The Jpath must be analyzed
for (i = 0, p = pbuf; (p = strchr(p, ':')); i++, p++)
Nod++; // One path node found
- Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE));
+ if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE))))
+ return true;
+
memset(Nodes, 0, (Nod)* sizeof(JNODE));
// Analyze the Jpath for this column
@@ -1086,9 +1089,10 @@ inline void JsonFreeMem(PGLOBAL g)
/*********************************************************************************/
static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
char *message, my_bool mbn,
- unsigned long reslen, unsigned long memlen)
+ unsigned long reslen, unsigned long memlen,
+ unsigned long more = 0)
{
- PGLOBAL g = PlugInit(NULL, memlen);
+ PGLOBAL g = PlugInit(NULL, memlen + more);
if (!g) {
strcpy(message, "Allocation error");
@@ -1100,6 +1104,7 @@ static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
} // endif g
g->Mrr = (args->arg_count && args->args[0]) ? 1 : 0;
+ g->ActivityStart = (PACTIVITY)more;
initid->maybe_null = mbn;
initid->max_length = reslen;
initid->ptr = (char*)g;
@@ -1444,6 +1449,8 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
} // endif b
+ ml += (unsigned long)g->ActivityStart; // more
+
if (ml > g->Sarea_Size) {
free(g->Sarea);
@@ -2758,7 +2765,7 @@ void json_item_merge_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more;
int n = IsJson(args, 0);
if (args->arg_count < 2) {
@@ -2767,7 +2774,7 @@ my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else if (!n && args->arg_type[0] != STRING_RESULT) {
strcpy(message, "First argument must be a json item");
return true;
- } else if (args->arg_type[1] != STRING_RESULT) {
+ } else if (args->arg_type[1] != STRING_RESULT) {
strcpy(message, "Second argument is not a string (jpath)");
return true;
} else
@@ -2780,11 +2787,13 @@ my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
- } else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more = fl * 3;
+ } else if (n != 3) {
+ more = args->lengths[0] * 3;
+ } else
+ more = 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of json_get_item_init
char *json_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2885,7 +2894,7 @@ my_bool jsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif's
CalcLen(args, false, reslen, memlen);
- memlen += more;
+//memlen += more;
if (n == 2 && args->args[0]) {
char fn[_MAX_PATH];
@@ -2894,11 +2903,11 @@ my_bool jsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
+ more += fl * 3;
} else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more += args->lengths[0] * 3;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonget_string_init
char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2994,7 +3003,7 @@ void jsonget_string_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more;
if (args->arg_count != 2) {
strcpy(message, "This function must have 2 arguments");
@@ -3008,10 +3017,10 @@ my_bool jsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more = (IsJson(args, 0) != 3) ? 1000 : 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonget_int_init
long long jsonget_int(UDF_INIT *initid, UDF_ARGS *args,
@@ -3100,7 +3109,7 @@ void jsonget_int_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more;
if (args->arg_count < 2) {
strcpy(message, "At least 2 arguments required");
@@ -3123,10 +3132,10 @@ my_bool jsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more = (IsJson(args, 0) != 3) ? 1000 : 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonget_real_init
double jsonget_real(UDF_INIT *initid, UDF_ARGS *args,
@@ -3234,10 +3243,11 @@ my_bool jsonlocate_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += more; // TODO: calculate this
+ // TODO: calculate this
+ if (IsJson(args, 0) == 3)
+ more = 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonlocate_init
char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3358,10 +3368,11 @@ my_bool json_locate_all_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += more; // TODO: calculate this
+ // TODO: calculate this
+ if (IsJson(args, 0) == 3)
+ more = 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of json_locate_all_init
char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3485,12 +3496,12 @@ my_bool jsoncontains_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif's
CalcLen(args, false, reslen, memlen);
- memlen += more;
+//memlen += more;
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more += (IsJson(args, 0) != 3 ? 1000 : 0);
- return JsonInit(initid, args, message, false, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen, more);
} // end of jsoncontains_init
long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3537,12 +3548,12 @@ my_bool jsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif's
CalcLen(args, false, reslen, memlen);
- memlen += more;
+//memlen += more;
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more += (IsJson(args, 0) != 3 ? 1000 : 0);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsoncontains_path_init
long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3736,7 +3747,7 @@ fin:
/*********************************************************************************/
my_bool json_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more = 0;
int n = IsJson(args, 0);
if (!(args->arg_count % 2)) {
@@ -3755,11 +3766,11 @@ my_bool json_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
+ more += fl * 3;
} else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more += args->lengths[0] * 3;
- if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ if (!JsonInit(initid, args, message, true, reslen, memlen, more)) {
PGLOBAL g = (PGLOBAL)initid->ptr;
// This is a constant function
@@ -3954,7 +3965,7 @@ void json_file_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen, more = 1024;
+ unsigned long reslen, memlen;
if (args->arg_count < 1 || args->arg_count > 3) {
strcpy(message, "Wrong number of arguments");
@@ -4993,7 +5004,7 @@ fin:
/*********************************************************************************/
my_bool jbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more = 0;
int n = IsJson(args, 0);
if (!(args->arg_count % 2)) {
@@ -5012,11 +5023,11 @@ my_bool jbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
+ more = fl * 3;
} else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more = args->lengths[0] * 3;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jbin_set_item_init
char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -5104,8 +5115,8 @@ my_bool jbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
fl = GetFileLength(args->args[0]);
reslen += fl;
more += fl * M;
- memlen += more;
- return JsonInit(initid, args, message, true, reslen, memlen);
+//memlen += more;
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jbin_file_init
char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index 497fe5e1aa8..30ac7613dd6 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2016
+/* Copyright (C) Olivier Bertrand 2004 - 2017
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,9 +16,9 @@
/*************** Mycat CC Program Source Code File (.CC) ***************/
/* PROGRAM NAME: MYCAT */
/* ------------- */
-/* Version 1.5 */
+/* Version 1.6 */
/* */
-/* Author: Olivier Bertrand 2012 - 2016 */
+/* Author: Olivier Bertrand 2012 - 2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -58,9 +58,10 @@
#endif // UNIX
#include "global.h"
#include "plgdbsem.h"
-#include "reldef.h"
-#include "tabcol.h"
+//#include "reldef.h"
#include "xtable.h"
+#include "tabext.h"
+#include "tabcol.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabfmt.h"
@@ -559,13 +560,13 @@ PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_XML: tdp= new(g) XMLDEF; break;
#endif // XML_SUPPORT
#if defined(VCT_SUPPORT)
- case TAB_VEC: tdp = new(g)VCTDEF; break;
+ case TAB_VEC: tdp = new(g) VCTDEF; break;
#endif // VCT_SUPPORT
#if defined(ODBC_SUPPORT)
case TAB_ODBC: tdp= new(g) ODBCDEF; break;
#endif // ODBC_SUPPORT
#if defined(JDBC_SUPPORT)
- case TAB_JDBC: tdp= new(g)JDBCDEF; break;
+ case TAB_JDBC: tdp= new(g) JDBCDEF; break;
#endif // JDBC_SUPPORT
#if defined(__WIN__)
case TAB_MAC: tdp= new(g) MACDEF; break;
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index 644ca019e4a..d05254a32a6 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -1,11 +1,11 @@
/************** MyConn C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: MYCONN */
/* ------------- */
-/* Version 1.8 */
+/* Version 1.9 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2007-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2007-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -375,10 +375,18 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db,
if (!port)
port = mysqld_port;
- if (!strnicmp(srcdef, "select ", 7)) {
- query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 9);
- strcat(strcpy(query, srcdef), " LIMIT 0");
- } else
+ if (!strnicmp(srcdef, "select ", 7) || strstr(srcdef, "%s")) {
+ query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 10);
+
+ if (strstr(srcdef, "%s"))
+ sprintf(query, srcdef, "1=1"); // dummy where clause
+ else
+ strcpy(query, srcdef);
+
+ if (!strnicmp(srcdef, "select ", 7))
+ strcat(query, " LIMIT 0");
+
+ } else
query = (char *)srcdef;
// Open a MySQL connection for this table
diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result
new file mode 100644
index 00000000000..f176149c53f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/xml_zip.result
@@ -0,0 +1,98 @@
+Warnings:
+Warning 1105 No file name. Table will use t1.xml
+#
+# Testing zipped XML tables
+#
+CREATE TABLE t1 (
+ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
+LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
+SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+TITLE CHAR(30) NOT NULL,
+PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+DATEPUB CHAR(4) NOT NULL
+) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+SELECT * FROM t1;
+ISBN 9782212090819
+LANG fr
+SUBJECT applications
+AUTHOR_FIRSTNAME Jean-Christophe
+AUTHOR_LASTNAME Bernadac
+TRANSLATOR_PREFIX NULL
+TRANSLATOR_FIRSTNAME NULL
+TRANSLATOR_LASTNAME NULL
+TITLE Construire une application XML
+PUBLISHER_NAME Eyrolles
+PUBLISHER_PLACE Paris
+DATEPUB 1999
+ISBN 9782212090819
+LANG fr
+SUBJECT applications
+AUTHOR_FIRSTNAME François
+AUTHOR_LASTNAME Knab
+TRANSLATOR_PREFIX NULL
+TRANSLATOR_FIRSTNAME NULL
+TRANSLATOR_LASTNAME NULL
+TITLE Construire une application XML
+PUBLISHER_NAME Eyrolles
+PUBLISHER_PLACE Paris
+DATEPUB 1999
+ISBN 9782840825685
+LANG fr
+SUBJECT applications
+AUTHOR_FIRSTNAME William J.
+AUTHOR_LASTNAME Pardi
+TRANSLATOR_PREFIX adapté de l'anglais par
+TRANSLATOR_FIRSTNAME James
+TRANSLATOR_LASTNAME Guerin
+TITLE XML en Action
+PUBLISHER_NAME Microsoft Press
+PUBLISHER_PLACE Paris
+DATEPUB 1999
+ISBN 9782212090529
+LANG fr
+SUBJECT général
+AUTHOR_FIRSTNAME Alain
+AUTHOR_LASTNAME Michard
+TRANSLATOR_PREFIX NULL
+TRANSLATOR_FIRSTNAME NULL
+TRANSLATOR_LASTNAME NULL
+TITLE XML, Langage et Applications
+PUBLISHER_NAME Eyrolles
+PUBLISHER_PLACE Paris
+DATEPUB 2003
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='xmlsup=libxml2';
+SELECT * FROM t2;
+ISBN 9782212090819
+LANG fr
+SUBJECT applications
+AUTHOR Jean-Christophe Bernadac
+TRANSLATOR NULL
+TITLE Construire une application XML
+PUBLISHER Eyrolles Paris
+DATEPUB 1999
+ISBN 9782840825685
+LANG fr
+SUBJECT applications
+AUTHOR William J. Pardi
+TRANSLATOR James Guerin
+TITLE XML en Action
+PUBLISHER Microsoft Press Paris
+DATEPUB 1999
+ISBN 9782212090529
+LANG fr
+SUBJECT général
+AUTHOR Alain Michard
+TRANSLATOR NULL
+TITLE XML, Langage et Applications
+PUBLISHER Eyrolles Paris
+DATEPUB 2003
+DROP TABLE t1,t2;
diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result
new file mode 100644
index 00000000000..c03b27bd428
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/zip.result
@@ -0,0 +1,240 @@
+#
+# Testing zipped DOS tables
+#
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new1.dos' ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new2.dos,APPEND=1' ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+digit letter
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t3 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='MULENTRIES=1' ZIPPED=1;
+SELECT * FROM t3;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newdos.zip';
+SELECT * FROM t4;
+fn cmpsize uncsize method
+new1.dos 67 79 8
+new2.dos 77 112 8
+DROP TABLE t1,t2,t3,t4;
+#
+# Testing zipped CSV tables
+#
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+CREATE TABLE td1
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+SELECT * FROM td1;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+DROP TABLE td1;
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new2.csv,APPEND=1' HEADER=1 ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+digit letter
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t3
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='MULENTRIES=1' HEADER=1 ZIPPED=1;
+SELECT * FROM t3;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newcsv.zip';
+SELECT * FROM t4;
+fn cmpsize uncsize method
+new1.csv 79 83 8
+new2.csv 94 125 8
+DROP TABLE t1,t2,t3,t4;
+#
+# Testing zipped JSON tables
+#
+CREATE TABLE t1 (
+_id INT(2) NOT NULL,
+name_first CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+name_last CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth CHAR(20) DEFAULT NULL,
+death CHAR(20) DEFAULT NULL,
+contribs CHAR(7) NOT NULL FIELD_FORMAT='contribs:',
+awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards::award',
+awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards::year',
+awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='awards::by'
+) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
+SELECT * FROM t1;
+_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
+1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
+2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM
+3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association
+4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation
+7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM
+8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation
+9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist
+10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
+OPTION_LIST='LEVEL=5';
+SELECT * FROM t2;
+_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
+1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
+2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM
+3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association
+4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation
+7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM
+8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation
+9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist
+10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
+CREATE TABLE t3 (
+_id INT(2) NOT NULL,
+firstname CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+lastname CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+contribs CHAR(64) NOT NULL FIELD_FORMAT='contribs:[", "]',
+award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards:[x]:award',
+year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards:[x]:year',
+`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='awards:[x]:by'
+) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
+SELECT * FROM t3 WHERE _id = 1;
+_id firstname aka lastname title birth death contribs award year by
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP W.W. McDowell Award 1967 IEEE Computer Society
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP National Medal of Science 1975 National Science Foundation
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP Turing Award 1977 ACM
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP Draper Prize 1993 National Academy of Engineering
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='bios.zip';
+SELECT * FROM t4;
+fn cmpsize uncsize method
+bios.json 1096 6848 8
+DROP TABLE t1,t2,t3,t4;
diff --git a/storage/connect/mysql-test/connect/std_data/bios.json b/storage/connect/mysql-test/connect/std_data/bios.json
new file mode 100644
index 00000000000..85e4ecb933f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/bios.json
@@ -0,0 +1,273 @@
+[
+ {
+ "_id" : 1,
+ "name" : {
+ "first" : "John",
+ "last" : "Backus"
+ },
+ "birth" : "1924-12-03T05:00:00Z",
+ "death" : "2007-03-17T04:00:00Z",
+ "contribs" : [
+ "Fortran",
+ "ALGOL",
+ "Backus-Naur Form",
+ "FP"
+ ],
+ "awards" : [
+ {
+ "award" : "W.W. McDowell Award",
+ "year" : 1967,
+ "by" : "IEEE Computer Society"
+ },
+ {
+ "award" : "National Medal of Science",
+ "year" : 1975,
+ "by" : "National Science Foundation"
+ },
+ {
+ "award" : "Turing Award",
+ "year" : 1977,
+ "by" : "ACM"
+ },
+ {
+ "award" : "Draper Prize",
+ "year" : 1993,
+ "by" : "National Academy of Engineering"
+ }
+ ]
+ },
+ {
+ "_id" : 2,
+ "name" : {
+ "first" : "John",
+ "last" : "McCarthy"
+ },
+ "birth" : "1927-09-04T04:00:00Z",
+ "death" : "2011-12-24T05:00:00Z",
+ "contribs" : [
+ "Lisp",
+ "Artificial Intelligence",
+ "ALGOL"
+ ],
+ "awards" : [
+ {
+ "award" : "Turing Award",
+ "year" : 1971,
+ "by" : "ACM"
+ },
+ {
+ "award" : "Kyoto Prize",
+ "year" : 1988,
+ "by" : "Inamori Foundation"
+ },
+ {
+ "award" : "National Medal of Science",
+ "year" : 1990,
+ "by" : "National Science Foundation"
+ }
+ ]
+ },
+ {
+ "_id" : 3,
+ "name" : {
+ "first" : "Grace",
+ "last" : "Hopper"
+ },
+ "title" : "Rear Admiral",
+ "birth" : "1906-12-09T05:00:00Z",
+ "death" : "1992-01-01T05:00:00Z",
+ "contribs" : [
+ "UNIVAC",
+ "compiler",
+ "FLOW-MATIC",
+ "COBOL"
+ ],
+ "awards" : [
+ {
+ "award" : "Computer Sciences Man of the Year",
+ "year" : 1969,
+ "by" : "Data Processing Management Association"
+ },
+ {
+ "award" : "Distinguished Fellow",
+ "year" : 1973,
+ "by" : " British Computer Society"
+ },
+ {
+ "award" : "W. W. McDowell Award",
+ "year" : 1976,
+ "by" : "IEEE Computer Society"
+ },
+ {
+ "award" : "National Medal of Technology",
+ "year" : 1991,
+ "by" : "United States"
+ }
+ ]
+ },
+ {
+ "_id" : 4,
+ "name" : {
+ "first" : "Kristen",
+ "last" : "Nygaard"
+ },
+ "birth" : "1926-08-27T04:00:00Z",
+ "death" : "2002-08-10T04:00:00Z",
+ "contribs" : [
+ "OOP",
+ "Simula"
+ ],
+ "awards" : [
+ {
+ "award" : "Rosing Prize",
+ "year" : 1999,
+ "by" : "Norwegian Data Association"
+ },
+ {
+ "award" : "Turing Award",
+ "year" : 2001,
+ "by" : "ACM"
+ },
+ {
+ "award" : "IEEE John von Neumann Medal",
+ "year" : 2001,
+ "by" : "IEEE"
+ }
+ ]
+ },
+ {
+ "_id" : 5,
+ "name" : {
+ "first" : "Ole-Johan",
+ "last" : "Dahl"
+ },
+ "birth" : "1931-10-12T04:00:00Z",
+ "death" : "2002-06-29T04:00:00Z",
+ "contribs" : [
+ "OOP",
+ "Simula"
+ ],
+ "awards" : [
+ {
+ "award" : "Rosing Prize",
+ "year" : 1999,
+ "by" : "Norwegian Data Association"
+ },
+ {
+ "award" : "Turing Award",
+ "year" : 2001,
+ "by" : "ACM"
+ },
+ {
+ "award" : "IEEE John von Neumann Medal",
+ "year" : 2001,
+ "by" : "IEEE"
+ }
+ ]
+ },
+ {
+ "_id" : 6,
+ "name" : {
+ "first" : "Guido",
+ "last" : "van Rossum"
+ },
+ "birth" : "1956-01-31T05:00:00Z",
+ "contribs" : [
+ "Python"
+ ],
+ "awards" : [
+ {
+ "award" : "Award for the Advancement of Free Software",
+ "year" : 2001,
+ "by" : "Free Software Foundation"
+ },
+ {
+ "award" : "NLUUG Award",
+ "year" : 2003,
+ "by" : "NLUUG"
+ }
+ ]
+ },
+ {
+ "_id" : 7,
+ "name" : {
+ "first" : "Dennis",
+ "last" : "Ritchie"
+ },
+ "birth" : "1941-09-09T04:00:00Z",
+ "death" : "2011-10-12T04:00:00Z",
+ "contribs" : [
+ "UNIX",
+ "C"
+ ],
+ "awards" : [
+ {
+ "award" : "Turing Award",
+ "year" : 1983,
+ "by" : "ACM"
+ },
+ {
+ "award" : "National Medal of Technology",
+ "year" : 1998,
+ "by" : "United States"
+ },
+ {
+ "award" : "Japan Prize",
+ "year" : 2011,
+ "by" : "The Japan Prize Foundation"
+ }
+ ]
+ },
+ {
+ "_id" : 8,
+ "name" : {
+ "first" : "Yukihiro",
+ "aka" : "Matz",
+ "last" : "Matsumoto"
+ },
+ "birth" : "1965-04-14T04:00:00Z",
+ "contribs" : [
+ "Ruby"
+ ],
+ "awards" : [
+ {
+ "award" : "Award for the Advancement of Free Software",
+ "year" : "2011",
+ "by" : "Free Software Foundation"
+ }
+ ]
+ },
+ {
+ "_id" : 9,
+ "name" : {
+ "first" : "James",
+ "last" : "Gosling"
+ },
+ "birth" : "1955-05-19T04:00:00Z",
+ "contribs" : [
+ "Java"
+ ],
+ "awards" : [
+ {
+ "award" : "The Economist Innovation Award",
+ "year" : 2002,
+ "by" : "The Economist"
+ },
+ {
+ "award" : "Officer of the Order of Canada",
+ "year" : 2007,
+ "by" : "Canada"
+ }
+ ]
+ },
+ {
+ "_id" : 10,
+ "name" : {
+ "first" : "Martin",
+ "last" : "Odersky"
+ },
+ "contribs" : [
+ "Scala"
+ ]
+ }
+]
diff --git a/storage/connect/mysql-test/connect/std_data/xsample2.xml b/storage/connect/mysql-test/connect/std_data/xsample2.xml
new file mode 100644
index 00000000000..35295844370
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/xsample2.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<BIBLIO SUBJECT="XML">
+ <BOOK ISBN="9782212090819" LANG="fr" SUBJECT="applications">
+ <AUTHOR>
+ <FIRSTNAME>Jean-Christophe</FIRSTNAME>
+ <LASTNAME>Bernadac</LASTNAME>
+ </AUTHOR>
+ <AUTHOR>
+ <FIRSTNAME>François</FIRSTNAME>
+ <LASTNAME>Knab</LASTNAME>
+ </AUTHOR>
+ <TITLE>Construire une application XML</TITLE>
+ <PUBLISHER>
+ <NAME>Eyrolles</NAME>
+ <PLACE>Paris</PLACE>
+ </PUBLISHER>
+ <DATEPUB>1999</DATEPUB>
+ </BOOK>
+ <BOOK ISBN="9782840825685" LANG="fr" SUBJECT="applications">
+ <AUTHOR>
+ <FIRSTNAME>William J.</FIRSTNAME>
+ <LASTNAME>Pardi</LASTNAME>
+ </AUTHOR>
+ <TRANSLATOR PREFIX="adapté de l'anglais par">
+ <FIRSTNAME>James</FIRSTNAME>
+ <LASTNAME>Guerin</LASTNAME>
+ </TRANSLATOR>
+ <TITLE>XML en Action</TITLE>
+ <PUBLISHER>
+ <NAME>Microsoft Press</NAME>
+ <PLACE>Paris</PLACE>
+ </PUBLISHER>
+ <DATEPUB>1999</DATEPUB>
+ </BOOK>
+ <BOOK ISBN="9782212090529" LANG="fr" SUBJECT="général">
+ <AUTHOR>
+ <FIRSTNAME>Alain</FIRSTNAME>
+ <LASTNAME>Michard</LASTNAME>
+ </AUTHOR>
+ <TITLE>XML, Langage et Applications</TITLE>
+ <PUBLISHER>
+ <NAME>Eyrolles</NAME>
+ <PLACE>Paris</PLACE>
+ </PUBLISHER>
+ <DATEPUB>2003</DATEPUB>
+ </BOOK>
+</BIBLIO>
diff --git a/storage/connect/mysql-test/connect/t/have_zip.inc b/storage/connect/mysql-test/connect/t/have_zip.inc
new file mode 100644
index 00000000000..d1283fc1d38
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/have_zip.inc
@@ -0,0 +1,19 @@
+--disable_query_log
+--error 0,ER_UNKNOWN_ERROR
+CREATE TABLE t1 (a CHAR(10)) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='test.zip';
+if ($mysql_errno)
+{
+ Skip No ZIP support;
+}
+#if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
+# WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+# AND ENGINE='CONNECT'
+# AND CREATE_OPTIONS LIKE '%`table_type`=ZIP%'
+# AND CREATE OPTIONS LIKE "%`file_name`='test.zip'%"`)
+#{
+# DROP TABLE IF EXISTS t1;
+# Skip Need ZIP support;
+#}
+DROP TABLE t1;
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test
new file mode 100644
index 00000000000..d8c7894f861
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/xml_zip.test
@@ -0,0 +1,41 @@
+--source have_zip.inc
+--source have_libxml2.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--vertical_results
+
+--copy_file $MTR_SUITE_DIR/std_data/xsample2.xml $MYSQLD_DATADIR/test/xsample2.xml
+
+--echo #
+--echo # Testing zipped XML tables
+--echo #
+CREATE TABLE t1 (
+ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
+LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
+SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+TITLE CHAR(30) NOT NULL,
+PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+DATEPUB CHAR(4) NOT NULL
+) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+SELECT * FROM t1;
+
+#testing discovery
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='xmlsup=libxml2';
+SELECT * FROM t2;
+DROP TABLE t1,t2;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/xsample2.xml
+--remove_file $MYSQLD_DATADIR/test/xsample2.zip
diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test
new file mode 100644
index 00000000000..a4892e9ed4e
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/zip.test
@@ -0,0 +1,136 @@
+--source have_zip.inc
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/bios.json $MYSQLD_DATADIR/test/bios.json
+
+--echo #
+--echo # Testing zipped DOS tables
+--echo #
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new1.dos' ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new2.dos,APPEND=1' ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='MULENTRIES=1' ZIPPED=1;
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newdos.zip';
+SELECT * FROM t4;
+DROP TABLE t1,t2,t3,t4;
+
+--echo #
+--echo # Testing zipped CSV tables
+--echo #
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+
+# Test discovery
+CREATE TABLE td1
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+SELECT * FROM td1;
+DROP TABLE td1;
+
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new2.csv,APPEND=1' HEADER=1 ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+
+CREATE TABLE t3
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='MULENTRIES=1' HEADER=1 ZIPPED=1;
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newcsv.zip';
+SELECT * FROM t4;
+DROP TABLE t1,t2,t3,t4;
+
+--echo #
+--echo # Testing zipped JSON tables
+--echo #
+CREATE TABLE t1 (
+_id INT(2) NOT NULL,
+name_first CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+name_last CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth CHAR(20) DEFAULT NULL,
+death CHAR(20) DEFAULT NULL,
+contribs CHAR(7) NOT NULL FIELD_FORMAT='contribs:',
+awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards::award',
+awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards::year',
+awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='awards::by'
+) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
+SELECT * FROM t1;
+
+# Test discovery
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
+OPTION_LIST='LEVEL=5';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+_id INT(2) NOT NULL,
+firstname CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+lastname CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+contribs CHAR(64) NOT NULL FIELD_FORMAT='contribs:[", "]',
+award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards:[x]:award',
+year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards:[x]:year',
+`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='awards:[x]:by'
+) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
+SELECT * FROM t3 WHERE _id = 1;
+
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='bios.zip';
+SELECT * FROM t4;
+DROP TABLE t1,t2,t3,t4;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/newdos.zip
+--remove_file $MYSQLD_DATADIR/test/newcsv.zip
+--remove_file $MYSQLD_DATADIR/test/bios.zip
+--remove_file $MYSQLD_DATADIR/test/bios.json
+
diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp
index 7320f4cc1d9..433e392eace 100644
--- a/storage/connect/odbconn.cpp
+++ b/storage/connect/odbconn.cpp
@@ -35,8 +35,8 @@
#include "global.h"
#include "plgdbsem.h"
#include "xobject.h"
-//#include "kindex.h"
#include "xtable.h"
+#include "tabext.h"
#include "odbccat.h"
#include "tabodbc.h"
#include "plgcnx.h" // For DB types
@@ -413,12 +413,20 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
/**************************************************************************/
PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, POPARM sop)
{
+ char *sqry;
ODBConn *ocp = new(g) ODBConn(g, NULL);
if (ocp->Open(dsn, sop, 10) < 1) // openReadOnly + noOdbcDialog
return NULL;
- return ocp->GetMetaData(g, dsn, src);
+ if (strstr(src, "%s")) {
+ // Place holder for an eventual where clause
+ sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 3);
+ sprintf(sqry, src, "1=1", "1=1"); // dummy where clause
+ } else
+ sqry = src;
+
+ return ocp->GetMetaData(g, dsn, sqry);
} // end of ODBCSrcCols
#if 0
@@ -1417,7 +1425,7 @@ int ODBConn::ExecDirectSQL(char *sql, ODBCCOL *tocols)
b = true;
if (trace)
- htrc("ExecDirect hstmt=%p %.64s\n", hstmt, sql);
+ htrc("ExecDirect hstmt=%p %.256s\n", hstmt, sql);
if (m_Tdb->Srcdef) {
// Be sure this is a query returning a result set
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h
index cb408494319..800b1098d50 100644
--- a/storage/connect/plgdbsem.h
+++ b/storage/connect/plgdbsem.h
@@ -1,7 +1,7 @@
/************** PlgDBSem H Declares Source Code File (.H) **************/
/* Name: PLGDBSEM.H Version 3.7 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains the CONNECT storage engine definitions. */
/***********************************************************************/
@@ -57,7 +57,7 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */
TAB_FIX = 2, /* Fixed column offset, fixed LRECL */
TAB_BIN = 3, /* Like FIX but can have binary fields */
TAB_CSV = 4, /* DOS files with CSV records */
- TAB_FMT = 5, /* DOS files with formatted recordss */
+ TAB_FMT = 5, /* DOS files with formatted records */
TAB_DBF = 6, /* DBF Dbase or Foxpro files */
TAB_XML = 7, /* XML or HTML files */
TAB_INI = 8, /* INI or CFG files */
@@ -212,11 +212,24 @@ enum OPVAL {OP_EQ = 1, /* Filtering operator = */
OP_SUB = 17, /* Expression Substract operator */
OP_MULT = 18, /* Expression Multiply operator */
OP_DIV = 19, /* Expression Divide operator */
- OP_NOP = 21, /* Scalar function is nopped */
OP_NUM = 22, /* Scalar function Op Num */
- OP_ABS = 23, /* Scalar function Op Abs */
OP_MAX = 24, /* Scalar function Op Max */
OP_MIN = 25, /* Scalar function Op Min */
+ OP_EXP = 36, /* Scalar function Op Exp */
+ OP_FDISK = 94, /* Operator Disk of fileid */
+ OP_FPATH = 95, /* Operator Path of fileid */
+ OP_FNAME = 96, /* Operator Name of fileid */
+ OP_FTYPE = 97, /* Operator Type of fileid */
+ OP_LAST = 82, /* Index operator Find Last */
+ OP_FIRST = 106, /* Index operator Find First */
+ OP_NEXT = 107, /* Index operator Find Next */
+ OP_SAME = 108, /* Index operator Find Next Same */
+ OP_FSTDIF = 109, /* Index operator Find First dif */
+ OP_NXTDIF = 110, /* Index operator Find Next dif */
+ OP_PREV = 116}; /* Index operator Find Previous */
+#if 0
+ OP_NOP = 21, /* Scalar function is nopped */
+ OP_ABS = 23, /* Scalar function Op Abs */
OP_CEIL = 26, /* Scalar function Op Ceil */
OP_FLOOR = 27, /* Scalar function Op Floor */
OP_MOD = 28, /* Scalar function Op Mod */
@@ -312,6 +325,7 @@ enum OPVAL {OP_EQ = 1, /* Filtering operator = */
OP_REMOVE = 201, /* Scalar function Op Remove */
OP_RENAME = 202, /* Scalar function Op Rename */
OP_FCOMP = 203}; /* Scalar function Op Compare */
+#endif // 0
enum TUSE {USE_NO = 0, /* Table is not yet linearized */
USE_LIN = 1, /* Table is linearized */
@@ -356,6 +370,7 @@ typedef class XOBJECT *PXOB;
typedef class COLBLK *PCOL;
typedef class TDB *PTDB;
typedef class TDBASE *PTDBASE;
+typedef class TDBEXT *PTDBEXT;
typedef class TDBDOS *PTDBDOS;
typedef class TDBFIX *PTDBFIX;
typedef class TDBFMT *PTDBFMT;
@@ -374,6 +389,7 @@ typedef class KXYCOL *PXCOL;
typedef class CATALOG *PCATLG;
typedef class RELDEF *PRELDEF;
typedef class TABDEF *PTABDEF;
+typedef class EXTDEF *PEXTBDEF;
typedef class DOSDEF *PDOSDEF;
typedef class CSVDEF *PCSVDEF;
typedef class VCTDEF *PVCTDEF;
@@ -619,4 +635,4 @@ int global_open(GLOBAL *g, int msgid, const char *filename, int flags, int mode)
DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir);
char *MakeEscape(PGLOBAL g, char* str, char q);
-DllExport bool PushWarning(PGLOBAL, PTDBASE, int level = 1);
+DllExport bool PushWarning(PGLOBAL, PTDB, int level = 1);
diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp
index 83975c6d8fa..1910cdcdec8 100644
--- a/storage/connect/plgdbutl.cpp
+++ b/storage/connect/plgdbutl.cpp
@@ -1,11 +1,11 @@
/********** PlgDBUtl Fpe C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: PLGDBUTL */
/* ------------- */
-/* Version 3.9 */
+/* Version 4.0 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -939,7 +939,11 @@ int PlugCloseFile(PGLOBAL g __attribute__((unused)), PFBLOCK fp, bool all)
#endif // LIBXML2_SUPPORT
#ifdef ZIP_SUPPORT
case TYPE_FB_ZIP:
- ((ZIPUTIL*)fp->File)->close();
+ if (fp->Mode == MODE_INSERT)
+ ((ZIPUTIL*)fp->File)->close();
+ else
+ ((UNZIPUTL*)fp->File)->close();
+
fp->Memory = NULL;
fp->Mode = MODE_ANY;
fp->Count = 0;
@@ -1119,7 +1123,7 @@ char *GetAmName(PGLOBAL g, AMT am, void *memp)
return amn;
} // end of GetAmName
-#if defined(__WIN__) && !defined(NOCATCH)
+#if defined(SE_CATCH)
/***********************************************************************/
/* GetExceptionDesc: return the description of an exception code. */
/***********************************************************************/
@@ -1207,7 +1211,7 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e)
return p;
} // end of GetExceptionDesc
-#endif // __WIN__ && !NOCATCH
+#endif // SE_CATCH
/***********************************************************************/
/* PlgDBalloc: allocates or suballocates memory conditionally. */
diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp
index 71b72621b06..eb31e24235b 100644
--- a/storage/connect/plgxml.cpp
+++ b/storage/connect/plgxml.cpp
@@ -1,6 +1,6 @@
/******************************************************************/
/* Implementation of XML document processing using PdbXML. */
-/* Author: Olivier Bertrand 2007-2012 */
+/* Author: Olivier Bertrand 2007-2017 */
/******************************************************************/
#include "my_global.h"
#include "global.h"
@@ -49,7 +49,7 @@ bool XMLDOCUMENT::InitZip(PGLOBAL g, char *entry)
{
#if defined(ZIP_SUPPORT)
bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false;
- zip = new(g) ZIPUTIL(entry, mul);
+ zip = new(g) UNZIPUTL(entry, mul);
return zip == NULL;
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
diff --git a/storage/connect/plgxml.h b/storage/connect/plgxml.h
index db7dfa6bda5..6870764c503 100644
--- a/storage/connect/plgxml.h
+++ b/storage/connect/plgxml.h
@@ -101,7 +101,7 @@ class XMLDOCUMENT : public BLOCK {
// Members
#if defined(ZIP_SUPPORT)
- ZIPUTIL *zip; /* Used for zipped file */
+ UNZIPUTL *zip; /* Used for zipped file */
#else // !ZIP_SUPPORT
bool zip; /* Always false */
#endif // !ZIP_SUPPORT
diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.c
index 2551b603349..bfac8a5fd99 100644
--- a/storage/connect/plugutil.c
+++ b/storage/connect/plugutil.c
@@ -244,6 +244,9 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath)
char *drive = NULL, *defdrv = NULL;
#endif
+ if (trace > 1)
+ htrc("prefix=%s fn=%s path=%s\n", prefix, FileName, defpath);
+
if (!strncmp(FileName, "//", 2) || !strncmp(FileName, "\\\\", 2)) {
strcpy(pBuff, FileName); // Remote file
return pBuff;
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index 30e4d49d249..5bb7848ab1c 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -621,8 +621,8 @@ bool OEMDEF::DefineAM(PGLOBAL g, LPCSTR, int)
/***********************************************************************/
PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode)
{
- RECFM rfm;
- PTDBASE tdbp = NULL;
+ RECFM rfm;
+ PTDB tdbp = NULL;
// If define block not here yet, get it now
if (!Pxdef && !(Pxdef = GetXdef(g)))
@@ -632,7 +632,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode)
/* Allocate a TDB of the proper type. */
/* Column blocks will be allocated only when needed. */
/*********************************************************************/
- if (!(tdbp = (PTDBASE)Pxdef->GetTable(g, mode)))
+ if (!(tdbp = Pxdef->GetTable(g, mode)))
return NULL;
else
rfm = tdbp->GetFtype();
diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h
index bc1bd2ddd74..52a131dbf3d 100644
--- a/storage/connect/reldef.h
+++ b/storage/connect/reldef.h
@@ -64,15 +64,16 @@ class DllExport RELDEF : public BLOCK { // Relation definition block
}; // end of RELDEF
/***********************************************************************/
-/* These classes correspond to the data base description contained in */
-/* a .XDB file the A.M. DOS, FIX, CSV, MAP, BIN, VCT, PLG, ODBC, DOM. */
+/* This class corresponds to the data base description for tables */
+/* of type DOS, FIX, CSV, DBF, BIN, VCT, JSON, XML... */
/***********************************************************************/
class DllExport TABDEF : public RELDEF { /* Logical table descriptor */
friend class CATALOG;
friend class PLUGCAT;
friend class MYCAT;
- friend class TDBASE;
- public:
+ friend class TDB;
+ friend class TDBEXT;
+public:
// Constructor
TABDEF(void); // Constructor
@@ -112,11 +113,11 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */
int Sort; /* Table already sorted ??? */
int Multiple; /* 0: No 1: DIR 2: Section 3: filelist */
int Degree; /* Number of columns in the table */
- int Pseudo; /* Bit: 1 ROWID Ok, 2 FILEID Ok */
+ int Pseudo; /* Bit: 1 ROWID }Ok, 2 FILEID Ok */
bool Read_Only; /* true for read only tables */
const CHARSET_INFO *m_data_charset;
const char *csname; /* Table charset name */
- }; // end of TABDEF
+}; // end of TABDEF
/***********************************************************************/
/* Externally defined OEM tables. */
@@ -190,11 +191,12 @@ class DllExport COLCRT : public BLOCK { /* Column description block
/***********************************************************************/
/* Column definition block. */
/***********************************************************************/
-class DllExport COLDEF : public COLCRT { /* Column description block */
+class DllExport COLDEF : public COLCRT { /* Column description block */
friend class TABDEF;
friend class COLBLK;
friend class DBFFAM;
- friend class TDBASE;
+ friend class TDB;
+ friend class TDBASE;
friend class TDBDOS;
public:
COLDEF(void); // Constructor
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 16cc6c33b44..d2bb3d7a4af 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -102,6 +102,7 @@ DOSDEF::DOSDEF(void)
Mapped = false;
Zipped = false;
Mulentries = false;
+ Append = false;
Padded = false;
Huge = false;
Accept = false;
@@ -132,10 +133,13 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
: (am && (*am == 'B' || *am == 'b')) ? "B"
: (am && !stricmp(am, "DBF")) ? "D" : "V";
- if ((Zipped = GetBoolCatInfo("Zipped", false)))
- Mulentries = ((Entry = GetStringCatInfo(g, "Entry", NULL)))
- ? strchr(Entry, '*') || strchr(Entry, '?')
- : GetBoolCatInfo("Mulentries", false);
+ if ((Zipped = GetBoolCatInfo("Zipped", false))) {
+ Entry = GetStringCatInfo(g, "Entry", NULL);
+ Mulentries = (Entry && *Entry) ? strchr(Entry, '*') || strchr(Entry, '?')
+ : false;
+ Mulentries = GetBoolCatInfo("Mulentries", Mulentries);
+ Append = GetBoolCatInfo("Append", false);
+ }
Desc = Fn = GetStringCatInfo(g, "Filename", NULL);
Ofn = GetStringCatInfo(g, "Optname", Fn);
@@ -347,10 +351,26 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
if (Zipped) {
#if defined(ZIP_SUPPORT)
if (Recfm == RECFM_VAR) {
- txfp = new(g)ZIPFAM(this);
- tdbp = new(g)TDBDOS(this, txfp);
+ if (mode == MODE_READ || mode == MODE_ANY) {
+ txfp = new(g) UNZFAM(this);
+ } else if (mode == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's mode
+
+ tdbp = new(g) TDBDOS(this, txfp);
} else {
- txfp = new(g)ZPXFAM(this);
+ if (mode == MODE_READ || mode == MODE_ANY) {
+ txfp = new(g) UZXFAM(this);
+ } else if (mode == MODE_INSERT) {
+ txfp = new(g) ZPXFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's mode
+
tdbp = new(g)TDBFIX(this, txfp);
} // endif Recfm
@@ -376,7 +396,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
txfp = new(g) MPXFAM(this);
else if (Compressed) {
#if defined(GZ_SUPPORT)
- txfp = new(g) ZIXFAM(this);
+ txfp = new(g) GZXFAM(this);
#else // !GZ_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ");
return NULL;
@@ -484,7 +504,7 @@ TDBDOS::TDBDOS(PGLOBAL g, PTDBDOS tdbp) : TDBASE(tdbp)
} // end of TDBDOS copy constructor
// Method
-PTDB TDBDOS::CopyOne(PTABS t)
+PTDB TDBDOS::Clone(PTABS t)
{
PTDB tp;
PDOSCOL cp1, cp2;
@@ -498,7 +518,7 @@ PTDB TDBDOS::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate DOS column description block. */
diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h
index 4c8eb438a26..922d52ee399 100644
--- a/storage/connect/tabdos.h
+++ b/storage/connect/tabdos.h
@@ -28,7 +28,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
friend class TDBFIX;
friend class TXTFAM;
friend class DBFBASE;
- friend class ZIPUTIL;
+ friend class UNZIPUTL;
public:
// Constructor
DOSDEF(void);
@@ -43,7 +43,8 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
PSZ GetOfn(void) {return Ofn;}
PSZ GetEntry(void) {return Entry;}
bool GetMul(void) {return Mulentries;}
- void SetBlock(int block) {Block = block;}
+ bool GetAppend(void) {return Append;}
+ void SetBlock(int block) { Block = block; }
int GetBlock(void) {return Block;}
int GetLast(void) {return Last;}
void SetLast(int last) {Last = last;}
@@ -81,6 +82,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
bool Mapped; /* 0: disk file, 1: memory mapped file */
bool Zipped; /* true for zipped table file */
bool Mulentries; /* true for multiple entries */
+ bool Append; /* Used when creating zipped table */
bool Padded; /* true for padded table file */
bool Huge; /* true for files larger than 2GB */
bool Accept; /* true if wrong lines are accepted */
@@ -140,7 +142,7 @@ class DllExport TDBDOS : public TDBASE {
{return (PTDB)new(g) TDBDOS(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual void ResetDB(void) {Txfp->Reset();}
virtual bool IsUsingTemp(PGLOBAL g);
virtual bool IsIndexed(void) {return Indxd;}
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
new file mode 100644
index 00000000000..e3518126a49
--- /dev/null
+++ b/storage/connect/tabext.cpp
@@ -0,0 +1,640 @@
+/************* Tabext C++ Functions Source Code File (.CPP) ************/
+/* Name: TABEXT.CPP Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* */
+/* This file contains the TBX, TDB and OPJOIN classes functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant MariaDB header file. */
+/***********************************************************************/
+#define MYSQL_SERVER 1
+#include "my_global.h"
+#include "sql_class.h"
+#include "sql_servers.h"
+#include "sql_string.h"
+#if !defined(__WIN__)
+#include "osutil.h"
+#endif
+
+/***********************************************************************/
+/* Include required application header files */
+/* global.h is header containing all global Plug declarations. */
+/* plgdbsem.h is header containing the DB applic. declarations. */
+/* xobject.h is header containing XOBJECT derived classes declares. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "xtable.h"
+#include "tabext.h"
+#include "ha_connect.h"
+
+/* -------------------------- Class CONDFIL -------------------------- */
+
+/***********************************************************************/
+/* CONDFIL Constructor. */
+/***********************************************************************/
+CONDFIL::CONDFIL(const Item *cond, uint idx, AMT type)
+{
+ Cond = cond;
+ Idx = idx;
+ Type = type;
+ Op = OP_XX;
+ Cmds = NULL;
+ Alist = NULL;
+ All = true;
+ Bd = false;
+ Hv = false;
+ Body = NULL,
+ Having = NULL;
+} // end of CONDFIL constructor
+
+/***********************************************************************/
+/* Make and allocate the alias list. */
+/***********************************************************************/
+int CONDFIL::Init(PGLOBAL g, PHC hc)
+{
+ PTOS options = hc->GetTableOptionStruct();
+ char *p, *cn, *cal, *alt = NULL;
+ int rc = RC_OK;
+ bool h;
+
+ if (options)
+ alt = GetListOption(g, "Alias", options->oplist, NULL);
+
+ while (alt) {
+ if (!(p = strchr(alt, '='))) {
+ strcpy(g->Message, "Invalid alias list");
+ rc = RC_FX;
+ break;
+ } // endif !p
+
+ cal = alt; // Alias
+ *p++ = 0;
+
+ if ((h = *p == '*')) {
+ rc = RC_INFO;
+ p++;
+ } // endif h
+
+ cn = p; // Remote column name
+
+ if ((alt = strchr(p, ';')))
+ *alt++ = 0;
+
+ if (*cn == 0)
+ cn = alt;
+
+ Alist = new(g) ALIAS(Alist, cn, cal, h);
+ } // endwhile alt
+
+ return rc;
+} // end of Init
+
+/***********************************************************************/
+/* Make and allocate the alias list. */
+/***********************************************************************/
+const char *CONDFIL::Chk(const char *fln, bool *h)
+{
+ for (PAL pal = Alist; pal; pal = pal->Next)
+ if (!stricmp(fln, pal->Alias)) {
+ *h = pal->Having;
+ return pal->Name;
+ } // endif fln
+
+ *h = false;
+ return fln;
+} // end of Chk
+
+/* --------------------------- Class EXTDEF -------------------------- */
+
+/***********************************************************************/
+/* EXTDEF Constructor. */
+/***********************************************************************/
+EXTDEF::EXTDEF(void)
+{
+ Tabname = Tabschema = Username = Password = Tabcat = Tabtyp = NULL;
+ Colpat = Srcdef = Qchar = Qrystr = Sep = Phpos = NULL;
+ Options = Cto = Qto = Quoted = Maxerr = Maxres = Memory = 0;
+ Scrollable = Xsrc = false;
+} // end of EXTDEF constructor
+
+/***********************************************************************/
+/* DefineAM: define specific AM block values from XDB file. */
+/***********************************************************************/
+bool EXTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
+{
+ Desc = NULL;
+ Tabname = GetStringCatInfo(g, "Name",
+ (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+ Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+ Tabschema = GetStringCatInfo(g, "Dbname", NULL);
+ Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
+ Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
+ Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
+ Username = GetStringCatInfo(g, "User", NULL);
+ Password = GetStringCatInfo(g, "Password", NULL);
+
+ if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
+ Read_Only = true;
+
+ Qrystr = GetStringCatInfo(g, "Query_String", "?");
+ Sep = GetStringCatInfo(g, "Separator", NULL);
+//Alias = GetStringCatInfo(g, "Alias", NULL);
+ Phpos = GetStringCatInfo(g, "Phpos", NULL);
+ Xsrc = GetBoolCatInfo("Execsrc", FALSE);
+ Maxerr = GetIntCatInfo("Maxerr", 0);
+ Maxres = GetIntCatInfo("Maxres", 0);
+ Quoted = GetIntCatInfo("Quoted", 0);
+ Options = 0;
+ Cto = 0;
+ Qto = 0;
+
+ if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
+ Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
+
+ if (Catfunc == FNC_COL)
+ Colpat = GetStringCatInfo(g, "Colpat", NULL);
+
+ if (Catfunc == FNC_TABLE)
+ Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
+
+ // Memory was Boolean, it is now integer
+ if (!(Memory = GetIntCatInfo("Memory", 0)))
+ Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
+
+ Pseudo = 2; // FILID is Ok but not ROWID
+ return false;
+} // end of DefineAM
+
+/* ---------------------------TDBEXT class --------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBEXT class. */
+/***********************************************************************/
+TDBEXT::TDBEXT(EXTDEF *tdp) : TDB(tdp)
+{
+ Qrp = NULL;
+
+ if (tdp) {
+ TableName = tdp->Tabname;
+ Schema = tdp->Tabschema;
+ User = tdp->Username;
+ Pwd = tdp->Password;
+ Catalog = tdp->Tabcat;
+ Srcdef = tdp->Srcdef;
+ Qrystr = tdp->Qrystr;
+ Sep = tdp->GetSep();
+ Options = tdp->Options;
+ Cto = tdp->Cto;
+ Qto = tdp->Qto;
+ Quoted = MY_MAX(0, tdp->GetQuoted());
+ Rows = tdp->GetElemt();
+ Memory = tdp->Memory;
+ Scrollable = tdp->Scrollable;
+ } else {
+ TableName = NULL;
+ Schema = NULL;
+ User = NULL;
+ Pwd = NULL;
+ Catalog = NULL;
+ Srcdef = NULL;
+ Qrystr = NULL;
+ Sep = 0;
+ Options = 0;
+ Cto = 0;
+ Qto = 0;
+ Quoted = 0;
+ Rows = 0;
+ Memory = 0;
+ Scrollable = false;
+ } // endif tdp
+
+ Quote = NULL;
+ Query = NULL;
+ Count = NULL;
+ //Where = NULL;
+ MulConn = NULL;
+ DBQ = NULL;
+ Qrp = NULL;
+ Fpos = 0;
+ Curpos = 0;
+ AftRows = 0;
+ CurNum = 0;
+ Rbuf = 0;
+ BufSize = 0;
+ Nparm = 0;
+ Ncol = 0;
+ Placed = false;
+} // end of TDBEXT constructor
+
+TDBEXT::TDBEXT(PTDBEXT tdbp) : TDB(tdbp)
+{
+ Qrp = tdbp->Qrp;
+ TableName = tdbp->TableName;
+ Schema = tdbp->Schema;
+ User = tdbp->User;
+ Pwd = tdbp->Pwd;
+ Catalog = tdbp->Catalog;
+ Srcdef = tdbp->Srcdef;
+ Qrystr = tdbp->Qrystr;
+ Sep = tdbp->Sep;
+ Options = tdbp->Options;
+ Cto = tdbp->Cto;
+ Qto = tdbp->Qto;
+ Quoted = tdbp->Quoted;
+ Rows = tdbp->Rows;
+ Memory = tdbp->Memory;
+ Scrollable = tdbp->Scrollable;
+ Quote = tdbp->Quote;
+ Query = tdbp->Query;
+ Count = tdbp->Count;
+ //Where = tdbp->Where;
+ MulConn = tdbp->MulConn;
+ DBQ = tdbp->DBQ;
+ Fpos = 0;
+ Curpos = 0;
+ AftRows = 0;
+ CurNum = 0;
+ Rbuf = 0;
+ BufSize = tdbp->BufSize;
+ Nparm = tdbp->Nparm;
+ Ncol = tdbp->Ncol;
+ Placed = false;
+} // end of TDBEXT copy constructor
+
+/******************************************************************/
+/* Convert an UTF-8 string to latin characters. */
+/******************************************************************/
+int TDBEXT::Decode(char *txt, char *buf, size_t n)
+{
+ uint dummy_errors;
+ uint32 len = copy_and_convert(buf, n, &my_charset_latin1,
+ txt, strlen(txt),
+ &my_charset_utf8_general_ci,
+ &dummy_errors);
+ buf[len] = '\0';
+ return 0;
+} // end of Decode
+
+/***********************************************************************/
+/* MakeSQL: make the SQL statement use with remote connection. */
+/* TODO: when implementing remote filtering, column only used in */
+/* local filter should be removed from column list. */
+/***********************************************************************/
+bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
+{
+ char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
+ int len;
+ bool oom = false, first = true;
+ PTABLE tablep = To_Table;
+ PCOL colp;
+
+ if (Srcdef) {
+ if ((catp = strstr(Srcdef, "%s"))) {
+ char *fil1, *fil2;
+ PSZ ph = ((EXTDEF*)To_Def)->Phpos;
+
+ if (!ph)
+ ph = (strstr(catp + 2, "%s")) ? const_cast<char*>("WH") :
+ const_cast<char*>("W");
+
+ if (stricmp(ph, "H")) {
+ fil1 = (To_CondFil && *To_CondFil->Body)
+ ? To_CondFil->Body : PlugDup(g, "1=1");
+ } // endif ph
+
+ if (stricmp(ph, "W")) {
+ fil2 = (To_CondFil && To_CondFil->Having && *To_CondFil->Having)
+ ? To_CondFil->Having : PlugDup(g, "1=1");
+ } // endif ph
+
+ if (!stricmp(ph, "W")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1));
+ } else if (!stricmp(ph, "WH")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1, fil2));
+ } else if (!stricmp(ph, "H")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2));
+ } else if (!stricmp(ph, "HW")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2, fil1));
+ } else {
+ strcpy(g->Message, "MakeSQL: Wrong place holders specification");
+ return true;
+ } // endif's ph
+
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
+ return false;
+ } // endif Srcdef
+
+ // Allocate the string used to contain the Query
+ Query = new(g)STRING(g, 1023, "SELECT ");
+
+ if (!cnt) {
+ if (Columns) {
+ // Normal SQL statement to retrieve results
+ for (colp = Columns; colp; colp = colp->GetNext())
+ if (!colp->IsSpecial()) {
+ if (!first)
+ oom |= Query->Append(", ");
+ else
+ first = false;
+
+ // Column name can be encoded in UTF-8
+ Decode(colp->GetName(), buf, sizeof(buf));
+
+ if (Quote) {
+ // Put column name between identifier quotes in case in contains blanks
+ oom |= Query->Append(Quote);
+ oom |= Query->Append(buf);
+ oom |= Query->Append(Quote);
+ } else
+ oom |= Query->Append(buf);
+
+ ((PEXTCOL)colp)->SetRank(++Ncol);
+ } // endif colp
+
+ } else
+ // !Columns can occur for queries such that sql count(*) from...
+ // for which we will count the rows from sql * from...
+ oom |= Query->Append('*');
+
+ } else
+ // SQL statement used to retrieve the size of the result
+ oom |= Query->Append("count(*)");
+
+ oom |= Query->Append(" FROM ");
+
+ if (Catalog && *Catalog)
+ catp = Catalog;
+
+ //if (tablep->GetSchema())
+ // schmp = (char*)tablep->GetSchema();
+ //else
+ if (Schema && *Schema)
+ schmp = Schema;
+
+ if (catp) {
+ oom |= Query->Append(catp);
+
+ if (schmp) {
+ oom |= Query->Append('.');
+ oom |= Query->Append(schmp);
+ } // endif schmp
+
+ oom |= Query->Append('.');
+ } else if (schmp) {
+ oom |= Query->Append(schmp);
+ oom |= Query->Append('.');
+ } // endif schmp
+
+ // Table name can be encoded in UTF-8
+ Decode(TableName, buf, sizeof(buf));
+
+ if (Quote) {
+ // Put table name between identifier quotes in case in contains blanks
+ oom |= Query->Append(Quote);
+ oom |= Query->Append(buf);
+ oom |= Query->Append(Quote);
+ } else
+ oom |= Query->Append(buf);
+
+ len = Query->GetLength();
+
+ if (To_CondFil) {
+ if (Mode == MODE_READ) {
+ oom |= Query->Append(" WHERE ");
+ oom |= Query->Append(To_CondFil->Body);
+ len = Query->GetLength() + 1;
+ } else
+ len += (strlen(To_CondFil->Body) + 256);
+
+ } else
+ len += ((Mode == MODE_READX) ? 256 : 1);
+
+ if (oom || Query->Resize(len)) {
+ strcpy(g->Message, "MakeSQL: Out of memory");
+ return true;
+ } // endif oom
+
+ if (trace)
+ htrc("Query=%s\n", Query->GetStr());
+
+ return false;
+} // end of MakeSQL
+
+/***********************************************************************/
+/* MakeCommand: make the Update or Delete statement to send to the */
+/* MySQL server. Limited to remote values and filtering. */
+/***********************************************************************/
+bool TDBEXT::MakeCommand(PGLOBAL g)
+{
+ char *p, *stmt, name[68], *body = NULL;
+ char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1);
+ bool qtd = Quoted > 0;
+ int i = 0, k = 0;
+
+ // Make a lower case copy of the originale query and change
+ // back ticks to the data source identifier quoting character
+ do {
+ qrystr[i] = (Qrystr[i] == '`') ? *Quote : tolower(Qrystr[i]);
+ } while (Qrystr[i++]);
+
+ if (To_CondFil && (p = strstr(qrystr, " where "))) {
+ p[7] = 0; // Remove where clause
+ Qrystr[(p - qrystr) + 7] = 0;
+ body = To_CondFil->Body;
+ stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr)
+ + strlen(body) + 64);
+ } else
+ stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
+
+ // Check whether the table name is equal to a keyword
+ // If so, it must be quoted in the original query
+ strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
+
+ if (strstr(" update delete low_priority ignore quick from ", name)) {
+ strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote));
+ k += 2;
+ } else
+ strlwr(strcpy(name, Name)); // Not a keyword
+
+ if ((p = strstr(qrystr, name))) {
+ for (i = 0; i < p - qrystr; i++)
+ stmt[i] = (Qrystr[i] == '`') ? *Quote : Qrystr[i];
+
+ stmt[i] = 0;
+ k += i + (int)strlen(Name);
+
+ if (qtd && *(p - 1) == ' ')
+ strcat(strcat(strcat(stmt, Quote), TableName), Quote);
+ else
+ strcat(stmt, TableName);
+
+ i = (int)strlen(stmt);
+
+ do {
+ stmt[i++] = (Qrystr[k] == '`') ? *Quote : Qrystr[k];
+ } while (Qrystr[k++]);
+
+ if (body)
+ strcat(stmt, body);
+
+ } else {
+ sprintf(g->Message, "Cannot use this %s command",
+ (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
+ return true;
+ } // endif p
+
+ if (trace)
+ htrc("Command=%s\n", stmt);
+
+ Query = new(g)STRING(g, 0, stmt);
+ return (!Query->GetSize());
+} // end of MakeCommand
+
+/***********************************************************************/
+/* GetRecpos: return the position of last read record. */
+/***********************************************************************/
+int TDBEXT::GetRecpos(void)
+{
+ return Fpos;
+} // end of GetRecpos
+
+/***********************************************************************/
+/* ODBC GetMaxSize: returns table size estimate in number of lines. */
+/***********************************************************************/
+int TDBEXT::GetMaxSize(PGLOBAL g)
+{
+ if (MaxSize < 0) {
+ if (Mode == MODE_DELETE)
+ // Return 0 in mode DELETE in case of delete all.
+ MaxSize = 0;
+ else if (!Cardinality(NULL))
+ MaxSize = 10; // To make MySQL happy
+ else if ((MaxSize = Cardinality(g)) < 0)
+ MaxSize = 12; // So we can see an error occurred
+
+ } // endif MaxSize
+
+ return MaxSize;
+} // end of GetMaxSize
+
+/***********************************************************************/
+/* Return max size value. */
+/***********************************************************************/
+int TDBEXT::GetProgMax(PGLOBAL g)
+{
+ return GetMaxSize(g);
+} // end of GetProgMax
+
+/* ---------------------------EXTCOL class --------------------------- */
+
+/***********************************************************************/
+/* EXTCOL public constructor. */
+/***********************************************************************/
+EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+ : COLBLK(cdp, tdbp, i)
+{
+ if (cprec) {
+ Next = cprec->GetNext();
+ cprec->SetNext(this);
+ } else {
+ Next = tdbp->GetColumns();
+ tdbp->SetColumns(this);
+ } // endif cprec
+
+ if (trace)
+ htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this);
+
+ // Set additional remote access method information for column.
+ Crp = NULL;
+ Long = Precision;
+ To_Val = NULL;
+ Bufp = NULL;
+ Blkp = NULL;
+ Rank = 0; // Not known yet
+} // end of JDBCCOL constructor
+
+/***********************************************************************/
+/* EXTCOL private constructor. */
+/***********************************************************************/
+EXTCOL::EXTCOL(void) : COLBLK()
+{
+ Crp = NULL;
+ Buf_Type = TYPE_INT; // This is a count(*) column
+
+ // Set additional Dos access method information for column.
+ Long = sizeof(int);
+ To_Val = NULL;
+ Bufp = NULL;
+ Blkp = NULL;
+ Rank = 1;
+} // end of EXTCOL constructor
+
+/***********************************************************************/
+/* EXTCOL constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+EXTCOL::EXTCOL(PEXTCOL col1, PTDB tdbp) : COLBLK(col1, tdbp)
+{
+ Crp = col1->Crp;
+ Long = col1->Long;
+ To_Val = col1->To_Val;
+ Bufp = col1->Bufp;
+ Blkp = col1->Blkp;
+ Rank = col1->Rank;
+} // end of JDBCCOL copy constructor
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool EXTCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+{
+ if (!(To_Val = value)) {
+ sprintf(g->Message, MSG(VALUE_ERROR), Name);
+ return true;
+ } else if (Buf_Type == value->GetType()) {
+ // Values are of the (good) column type
+ if (Buf_Type == TYPE_DATE) {
+ // If any of the date values is formatted
+ // output format must be set for the receiving table
+ if (GetDomain() || ((DTVAL *)value)->IsFormatted())
+ goto newval; // This will make a new value;
+
+ } else if (Buf_Type == TYPE_DOUBLE)
+ // Float values must be written with the correct (column) precision
+ // Note: maybe this should be forced by ShowValue instead of this ?
+ value->SetPrec(GetScale());
+
+ Value = value; // Directly access the external value
+ } else {
+ // Values are not of the (good) column type
+ if (check) {
+ sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name,
+ GetTypeName(Buf_Type), GetTypeName(value->GetType()));
+ return true;
+ } // endif check
+
+ newval:
+ if (InitValue(g)) // Allocate the matching value block
+ return true;
+
+ } // endif's Value, Buf_Type
+
+ // Because Colblk's have been made from a copy of the original TDB in
+ // case of Update, we must reset them to point to the original one.
+ if (To_Tdb->GetOrig())
+ To_Tdb = (PTDB)To_Tdb->GetOrig();
+
+ // Set the Column
+ Status = (ok) ? BUF_EMPTY : BUF_NO;
+ return false;
+} // end of SetBuffer
+
diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h
new file mode 100644
index 00000000000..2ef20c89f2c
--- /dev/null
+++ b/storage/connect/tabext.h
@@ -0,0 +1,200 @@
+/*************** Tabext H Declares Source Code File (.H) ***************/
+/* Name: TABEXT.H Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* */
+/* This is the EXTDEF, TABEXT and EXTCOL classes definitions. */
+/***********************************************************************/
+
+#ifndef __TABEXT_H
+#define __TABEXTF_H
+
+#include "reldef.h"
+
+typedef class ALIAS *PAL;
+
+class ALIAS : public BLOCK {
+ public:
+ ALIAS(PAL x, PSZ n, PSZ a, bool h)
+ {Next = x, Name = n, Alias = a, Having = h;}
+
+ PAL Next;
+ PSZ Name;
+ PSZ Alias;
+ bool Having;
+}; // end of class ALIAS
+
+// Condition filter structure
+class CONDFIL : public BLOCK {
+ public:
+ // Constructor
+ CONDFIL(const Item *cond, uint idx, AMT type);
+
+ // Functions
+ int Init(PGLOBAL g, PHC hc);
+ const char *Chk(const char *cln, bool *h);
+
+ // Members
+ const Item *Cond;
+ AMT Type;
+ uint Idx;
+ OPVAL Op;
+ PCMD Cmds;
+ PAL Alist;
+ bool All;
+ bool Bd;
+ bool Hv;
+ char *Body;
+ char *Having;
+}; // end of class CONDFIL
+
+/***********************************************************************/
+/* This class corresponds to the data base description for external */
+/* tables of type MYSQL, ODBC, JDBC... */
+/***********************************************************************/
+class DllExport EXTDEF : public TABDEF { /* EXT table */
+ friend class TDBEXT;
+public:
+ // Constructor
+ EXTDEF(void); // Constructor
+
+ // Implementation
+ virtual const char *GetType(void) { return "EXT"; }
+ inline PSZ GetTabname(void) { return Tabname; }
+ inline PSZ GetTabschema(void) { return Tabschema; }
+ inline PSZ GetUsername(void) { return Username; };
+ inline PSZ GetPassword(void) { return Password; };
+ inline PSZ GetTabcat(void) { return Tabcat; }
+ inline PSZ GetSrcdef(void) { return Srcdef; }
+ inline char GetSep(void) { return (Sep) ? *Sep : 0; }
+ inline int GetQuoted(void) { return Quoted; }
+ inline int GetOptions(void) { return Options; }
+
+ // Methods
+ virtual int Indexable(void) { return 2; }
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+
+protected:
+ // Members
+ PSZ Tabname; /* External table name */
+ PSZ Tabschema; /* External table schema */
+ PSZ Username; /* User connect name */
+ PSZ Password; /* Password connect info */
+ PSZ Tabcat; /* External table catalog */
+ PSZ Tabtyp; /* Catalog table type */
+ PSZ Colpat; /* Catalog column pattern */
+ PSZ Srcdef; /* The source table SQL definition */
+ PSZ Qchar; /* Identifier quoting character */
+ PSZ Qrystr; /* The original query */
+ PSZ Sep; /* Decimal separator */
+//PSZ Alias; /* Column alias list */
+ PSZ Phpos; /* Place holer positions */
+ int Options; /* Open connection options */
+ int Cto; /* Open connection timeout */
+ int Qto; /* Query (command) timeout */
+ int Quoted; /* Identifier quoting level */
+ int Maxerr; /* Maxerr for an Exec table */
+ int Maxres; /* Maxres for a catalog table */
+ int Memory; /* Put result set in memory */
+ bool Scrollable; /* Use scrollable cursor */
+ bool Xsrc; /* Execution type */
+}; // end of EXTDEF
+
+/***********************************************************************/
+/* This is the base class for all external tables. */
+/***********************************************************************/
+class DllExport TDBEXT : public TDB {
+public:
+ // Constructors
+ TDBEXT(EXTDEF *tdp);
+ TDBEXT(PTDBEXT tdbp);
+
+ // Implementation
+
+ // Properties
+ virtual bool IsRemote(void) { return true; }
+
+ // Methods
+ virtual PSZ GetServer(void) { return "Remote"; }
+ virtual int GetRecpos(void);
+
+ // Database routines
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual int GetProgMax(PGLOBAL g);
+
+protected:
+ // Internal functions
+ virtual bool MakeSQL(PGLOBAL g, bool cnt);
+ //virtual bool MakeInsert(PGLOBAL g);
+ virtual bool MakeCommand(PGLOBAL g);
+ int Decode(char *utf, char *buf, size_t n);
+
+ // Members
+ PQRYRES Qrp; // Points to storage result
+ PSTRG Query; // Constructed SQL query
+ char *TableName; // Points to ODBC table name
+ char *Schema; // Points to ODBC table Schema
+ char *User; // User connect info
+ char *Pwd; // Password connect info
+ char *Catalog; // Points to ODBC table Catalog
+ char *Srcdef; // The source table SQL definition
+ char *Count; // Points to count(*) SQL statement
+ //char *Where; // Points to local where clause
+ char *Quote; // The identifier quoting character
+ char *MulConn; // Used for multiple ODBC tables
+ char *DBQ; // The address part of Connect string
+ char *Qrystr; // The original query
+ char Sep; // The decimal separator
+ int Options; // Connect options
+ int Cto; // Connect timeout
+ int Qto; // Query timeout
+ int Quoted; // The identifier quoting level
+ int Fpos; // Position of last read record
+ int Curpos; // Cursor position of last fetch
+ int AftRows; // The number of affected rows
+ int Rows; // Rowset size
+ int CurNum; // Current buffer line number
+ int Rbuf; // Number of lines read in buffer
+ int BufSize; // Size of connect string buffer
+ int Nparm; // The number of statement parameters
+ int Memory; // 0: No 1: Alloc 2: Put 3: Get
+ int Ncol; // The column number (JDBC)
+ bool Scrollable; // Use scrollable cursor
+ bool Placed; // True for position reading
+}; // end of class TDBEXT
+
+/***********************************************************************/
+/* Virual class EXTCOL: external column. */
+/***********************************************************************/
+class DllExport EXTCOL : public COLBLK {
+ friend class TDBEXT;
+public:
+ // Constructor
+ EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am);
+ EXTCOL(PEXTCOL colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ inline int GetRank(void) { return Rank; }
+ inline void SetRank(int k) { Rank = k; }
+ //inline PVBLK GetBlkp(void) {return Blkp;}
+ inline void SetCrp(PCOLRES crp) { Crp = crp; }
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void ReadColumn(PGLOBAL) = 0;
+ virtual void WriteColumn(PGLOBAL) = 0;
+
+protected:
+ // Constructor for count(*) column
+ EXTCOL(void);
+
+ // Members
+ PCOLRES Crp; // To storage result
+ void *Bufp; // To extended buffer
+ PVBLK Blkp; // To Value Block
+ PVAL To_Val; // To value used for Insert
+ int Rank; // Rank (position) number in the query
+ //int Flag; // ???
+}; // end of class EXTCOL
+
+#endif // __TABEXT_H
diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp
index d99f7800f26..bf123cd36c8 100644
--- a/storage/connect/tabfix.cpp
+++ b/storage/connect/tabfix.cpp
@@ -77,7 +77,7 @@ TDBFIX::TDBFIX(PGLOBAL g, PTDBFIX tdbp) : TDBDOS(g, tdbp)
} // end of TDBFIX copy constructor
// Method
-PTDB TDBFIX::CopyOne(PTABS t)
+PTDB TDBFIX::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G;
@@ -105,7 +105,7 @@ PTDB TDBFIX::CopyOne(PTABS t)
} // endif Ftype
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Reset read/write position values. */
diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h
index 49956ba0711..4b9f9689992 100644
--- a/storage/connect/tabfix.h
+++ b/storage/connect/tabfix.h
@@ -34,7 +34,7 @@ class DllExport TDBFIX : public TDBDOS {
{return (PTDB)new(g) TDBFIX(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual void ResetDB(void);
virtual bool IsUsingTemp(PGLOBAL g);
virtual int RowNumber(PGLOBAL g, bool b = false);
diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp
index b24375443f6..0da67ef5e7f 100644
--- a/storage/connect/tabfmt.cpp
+++ b/storage/connect/tabfmt.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2001 - 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2001 - 2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -98,8 +98,9 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
int num_read = 0, num_max = 10000000; // Statistics
int len[MAXCOL], typ[MAXCOL], prc[MAXCOL];
PCSVDEF tdp;
- PTDBCSV tdbp;
- PQRYRES qrp;
+ PTDBCSV tcvp;
+ PTDBASE tdbp;
+ PQRYRES qrp;
PCOLRES crp;
if (info) {
@@ -108,10 +109,10 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
goto skipit;
} // endif info
- if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
- strcpy(g->Message, "Cannot find column definition for multiple table");
- return NULL;
- } // endif Multiple
+ //if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
+ // strcpy(g->Message, "Cannot find column definition for multiple table");
+ // return NULL;
+ //} // endif Multiple
// num_max = atoi(p+1); // Max num of record to test
imax = hmax = nerr = 0;
@@ -127,10 +128,20 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
/* Get the CSV table description block. */
/*********************************************************************/
tdp = new(g) CSVDEF;
+ tdp->Database = dp;
+
+ if ((tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false))) {
#if defined(ZIP_SUPPORT)
- tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
- tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
-#endif // ZIP_SUPPORT
+ tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
+ tdp->Mulentries = (tdp->Entry)
+ ? strchr(tdp->Entry, '*') || strchr(tdp->Entry, '?')
+ : GetBooleanTableOption(g, topt, "Mulentries", false);
+#else // !ZIP_SUPPORT
+ strcpy(g->Message, "ZIP not supported by this version");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } // endif // Zipped
+
fn = tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
if (!tdp->Fn) {
@@ -141,6 +152,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))
tdp->Lrecl = 4096;
+ tdp->Multiple = GetIntegerTableOption(g, topt, "Multiple", 0);
p = GetStringTableOption(g, topt, "Separator", ",");
tdp->Sep = (strlen(p) == 2 && p[0] == '\\' && p[1] == 't') ? '\t' : *p;
@@ -177,17 +189,18 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
htrc("File %s Sep=%c Qot=%c Header=%d maxerr=%d\n",
SVP(tdp->Fn), tdp->Sep, tdp->Qot, tdp->Header, tdp->Maxerr);
- if (tdp->Zipped) {
-#if defined(ZIP_SUPPORT)
- tdbp = new(g)TDBCSV(tdp, new(g)ZIPFAM(tdp));
-#else // !ZIP_SUPPORT
- sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
- return NULL;
-#endif // !ZIP_SUPPORT
- } else
- tdbp = new(g) TDBCSV(tdp, new(g) DOSFAM(tdp));
+ if (tdp->Zipped)
+ tcvp = new(g)TDBCSV(tdp, new(g)UNZFAM(tdp));
+ else
+ tcvp = new(g) TDBCSV(tdp, new(g) DOSFAM(tdp));
+
+ tcvp->SetMode(MODE_READ);
- tdbp->SetMode(MODE_READ);
+ if (tdp->Multiple) {
+ tdbp = new(g)TDBMUL(tcvp);
+ tdbp->SetMode(MODE_READ);
+ } else
+ tdbp = tcvp;
/*********************************************************************/
/* Open the CSV file. */
@@ -202,7 +215,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
phase = 0;
if ((rc = tdbp->ReadDB(g)) == RC_OK) {
- p = PlgDBDup(g, tdbp->To_Line);
+ p = PlgDBDup(g, tcvp->To_Line);
//skip leading blanks
for (; *p == ' '; p++) ;
@@ -245,6 +258,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
for (i = 0; i < hmax; i++)
length[0] = MY_MAX(length[0], strlen(colname[i]));
+ tcvp->Header = true; // In case of multiple table
} // endif hdr
for (num_read++; num_read <= num_max; num_read++) {
@@ -265,7 +279,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
/*******************************************************************/
i = n = phase = blank = digit = dec = 0;
- for (p = tdbp->To_Line; *p; p++)
+ for (p = tcvp->To_Line; *p; p++)
if (*p == sep) {
if (phase != 1) {
if (i == MAXCOL - 1) {
@@ -503,7 +517,14 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode)
/*******************************************************************/
if (Zipped) {
#if defined(ZIP_SUPPORT)
- txfp = new(g) ZIPFAM(this);
+ if (mode == MODE_READ || mode == MODE_ANY) {
+ txfp = new(g) UNZFAM(this);
+ } else if (mode == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's mode
#else // !ZIP_SUPPORT
strcpy(g->Message, "ZIP not supported");
return NULL;
@@ -640,7 +661,7 @@ TDBCSV::TDBCSV(PGLOBAL g, PTDBCSV tdbp) : TDBDOS(g, tdbp)
} // end of TDBCSV copy constructor
// Method
-PTDB TDBCSV::CopyOne(PTABS t)
+PTDB TDBCSV::Clone(PTABS t)
{
PTDB tp;
PCSVCOL cp1, cp2;
@@ -654,7 +675,7 @@ PTDB TDBCSV::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate CSV column description block. */
@@ -1148,7 +1169,7 @@ TDBFMT::TDBFMT(PGLOBAL g, PTDBFMT tdbp) : TDBCSV(g, tdbp)
} // end of TDBFMT copy constructor
// Method
-PTDB TDBFMT::CopyOne(PTABS t)
+PTDB TDBFMT::Clone(PTABS t)
{
PTDB tp;
PCSVCOL cp1, cp2;
@@ -1165,7 +1186,7 @@ PTDB TDBFMT::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate FMT column description block. */
diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h
index 5ce8d399a64..e5655435be7 100644
--- a/storage/connect/tabfmt.h
+++ b/storage/connect/tabfmt.h
@@ -52,6 +52,7 @@ public:
/***********************************************************************/
class DllExport TDBCSV : public TDBDOS {
friend class CSVCOL;
+ friend class MAPFAM;
friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool);
public:
// Constructor
@@ -64,7 +65,7 @@ public:
{return (PTDB)new(g) TDBCSV(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
//virtual bool IsUsingTemp(PGLOBAL g);
virtual int GetBadLines(void) {return (int)Nerr;}
@@ -147,7 +148,7 @@ class DllExport TDBFMT : public TDBCSV {
{return (PTDB)new(g) TDBFMT(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp
index 912e6c7d530..5431e35e0ec 100644
--- a/storage/connect/tabjdbc.cpp
+++ b/storage/connect/tabjdbc.cpp
@@ -1,11 +1,11 @@
/************* TabJDBC C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABJDBC */
/* ------------- */
-/* Version 1.1 */
+/* Version 1.2 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -69,9 +69,10 @@
#include "plgdbsem.h"
#include "mycat.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabjdbc.h"
#include "tabmul.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabcol.h"
#include "valblk.h"
#include "ha_connect.h"
@@ -96,10 +97,7 @@ bool ExactInfo(void);
/***********************************************************************/
JDBCDEF::JDBCDEF(void)
{
- Driver = Url = Wrapname =Tabname = Tabschema = Username = Colpat = NULL;
- Password = Tabcat = Tabtype = Srcdef = Qchar = Qrystr = Sep = NULL;
- Options = Quoted = Maxerr = Maxres = Memory = 0;
- Scrollable = Xsrc = false;
+ Driver = Url = Wrapname = NULL;
} // end of JDBCDEF constructor
/***********************************************************************/
@@ -134,23 +132,26 @@ bool JDBCDEF::SetParms(PJPARM sjp)
int JDBCDEF::ParseURL(PGLOBAL g, char *url, bool b)
{
if (strncmp(url, "jdbc:", 5)) {
+ PSZ p;
+
// No "jdbc:" in connection string. Must be a straight
// "server" or "server/table"
// ok, so we do a little parsing, but not completely!
- if ((Tabname= strchr(url, '/'))) {
+ if ((p = strchr(url, '/'))) {
// If there is a single '/' in the connection string,
// this means the user is specifying a table name
- *Tabname++= '\0';
+ *p++= '\0';
// there better not be any more '/'s !
- if (strchr(Tabname, '/'))
+ if (strchr(p, '/'))
return RC_FX;
- } else if (b) {
- // Otherwise, straight server name,
- Tabname = GetStringCatInfo(g, "Name", NULL);
- Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- } // endelse
+ Tabname = p;
+// } else if (b) {
+// // Otherwise, straight server name,
+// Tabname = GetStringCatInfo(g, "Name", NULL);
+// Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+ } // endif
if (trace)
htrc("server: %s Tabname: %s", url, Tabname);
@@ -204,6 +205,9 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
int rc = RC_OK;
+ if (EXTDEF::DefineAM(g, am, poff))
+ return true;
+
Driver = GetStringCatInfo(g, "Driver", NULL);
Desc = Url = GetStringCatInfo(g, "Connect", NULL);
@@ -223,41 +227,41 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
if (rc == RC_FX) // Error
return true;
- else if (rc == RC_OK) { // Url was not a server name
- Tabname = GetStringCatInfo(g, "Name",
- (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
- Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- Username = GetStringCatInfo(g, "User", NULL);
- Password = GetStringCatInfo(g, "Password", NULL);
- } // endif rc
+//else if (rc == RC_OK) { // Url was not a server name
+// Tabname = GetStringCatInfo(g, "Name",
+// (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+// Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+// Username = GetStringCatInfo(g, "User", NULL);
+// Password = GetStringCatInfo(g, "Password", NULL);
+//} // endif rc
- if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
- Read_Only = true;
+//if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
+// Read_Only = true;
Wrapname = GetStringCatInfo(g, "Wrapper", NULL);
//Prop = GetStringCatInfo(g, "Properties", NULL);
- Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
- Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
- Tabschema = GetStringCatInfo(g, "Dbname", NULL);
- Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
-
- if (Catfunc == FNC_COL)
- Colpat = GetStringCatInfo(g, "Colpat", NULL);
-
- if (Catfunc == FNC_TABLE)
- Tabtype = GetStringCatInfo(g, "Tabtype", NULL);
-
- Qrystr = GetStringCatInfo(g, "Query_String", "?");
- Sep = GetStringCatInfo(g, "Separator", NULL);
- Xsrc = GetBoolCatInfo("Execsrc", FALSE);
- Maxerr = GetIntCatInfo("Maxerr", 0);
- Maxres = GetIntCatInfo("Maxres", 0);
- Quoted = GetIntCatInfo("Quoted", 0);
-//Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
-//Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
- Scrollable = GetBoolCatInfo("Scrollable", false);
- Memory = GetIntCatInfo("Memory", 0);
- Pseudo = 2; // FILID is Ok but not ROWID
+//Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
+//Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
+//Tabschema = GetStringCatInfo(g, "Dbname", NULL);
+//Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
+
+//if (Catfunc == FNC_COL)
+// Colpat = GetStringCatInfo(g, "Colpat", NULL);
+
+//if (Catfunc == FNC_TABLE)
+// Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
+
+//Qrystr = GetStringCatInfo(g, "Query_String", "?");
+//Sep = GetStringCatInfo(g, "Separator", NULL);
+//Xsrc = GetBoolCatInfo("Execsrc", FALSE);
+//Maxerr = GetIntCatInfo("Maxerr", 0);
+//Maxres = GetIntCatInfo("Maxres", 0);
+//Quoted = GetIntCatInfo("Quoted", 0);
+// Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
+// Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
+//Scrollable = GetBoolCatInfo("Scrollable", false);
+//Memory = GetIntCatInfo("Memory", 0);
+//Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -266,7 +270,7 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
/***********************************************************************/
PTDB JDBCDEF::GetTable(PGLOBAL g, MODE m)
{
- PTDBASE tdbp = NULL;
+ PTDB tdbp = NULL;
/*********************************************************************/
/* Allocate a TDB of the proper type. */
@@ -326,7 +330,7 @@ int JDBCPARM::CheckSize(int rows)
/***********************************************************************/
/* Implementation of the TDBJDBC class. */
/***********************************************************************/
-TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBASE(tdp)
+TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp)
{
Jcp = NULL;
Cnp = NULL;
@@ -335,101 +339,45 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBASE(tdp)
Ops.Driver = tdp->Driver;
Ops.Url = tdp->Url;
WrapName = tdp->Wrapname;
- TableName = tdp->Tabname;
- Schema = tdp->Tabschema;
Ops.User = tdp->Username;
Ops.Pwd = tdp->Password;
// Ops.Properties = tdp->Prop;
- Catalog = tdp->Tabcat;
- Srcdef = tdp->Srcdef;
- Qrystr = tdp->Qrystr;
- Sep = tdp->GetSep();
- Options = tdp->Options;
// Ops.Cto = tdp->Cto;
// Ops.Qto = tdp->Qto;
- Quoted = MY_MAX(0, tdp->GetQuoted());
- Rows = tdp->GetElemt();
- Memory = tdp->Memory;
Ops.Scrollable = tdp->Scrollable;
} else {
WrapName = NULL;
- TableName = NULL;
- Schema = NULL;
Ops.Driver = NULL;
Ops.Url = NULL;
Ops.User = NULL;
Ops.Pwd = NULL;
// Ops.Properties = NULL;
- Catalog = NULL;
- Srcdef = NULL;
- Qrystr = NULL;
- Sep = 0;
- Options = 0;
// Ops.Cto = DEFAULT_LOGIN_TIMEOUT;
// Ops.Qto = DEFAULT_QUERY_TIMEOUT;
- Quoted = 0;
- Rows = 0;
- Memory = 0;
Ops.Scrollable = false;
} // endif tdp
- Quote = NULL;
- Query = NULL;
- Count = NULL;
-//Where = NULL;
- MulConn = NULL;
- DBQ = NULL;
- Qrp = NULL;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = 0;
- Ncol = 0;
- Nparm = 0;
- Placed = false;
+//Ncol = 0;
Prepared = false;
Werr = false;
Rerr = false;
Ops.Fsize = Ops.CheckSize(Rows);
} // end of TDBJDBC standard constructor
-TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBASE(tdbp)
+TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBEXT(tdbp)
{
Jcp = tdbp->Jcp; // is that right ?
Cnp = tdbp->Cnp;
WrapName = tdbp->WrapName;
- TableName = tdbp->TableName;
- Schema = tdbp->Schema;
Ops = tdbp->Ops;
- Catalog = tdbp->Catalog;
- Srcdef = tdbp->Srcdef;
- Qrystr = tdbp->Qrystr;
- Memory = tdbp->Memory;
-//Scrollable = tdbp->Scrollable;
- Quote = tdbp->Quote;
- Query = tdbp->Query;
- Count = tdbp->Count;
-//Where = tdbp->Where;
- MulConn = tdbp->MulConn;
- DBQ = tdbp->DBQ;
- Options = tdbp->Options;
- Quoted = tdbp->Quoted;
- Rows = tdbp->Rows;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = tdbp->BufSize;
- Nparm = tdbp->Nparm;
- Qrp = tdbp->Qrp;
- Placed = false;
+//Ncol = tdbp->Ncol;
+ Prepared = tdbp->Prepared;
+ Werr = tdbp->Werr;
+ Rerr = tdbp->Rerr;
} // end of TDBJDBC copy constructor
// Method
-PTDB TDBJDBC::CopyOne(PTABS t)
+PTDB TDBJDBC::Clone(PTABS t)
{
PTDB tp;
PJDBCCOL cp1, cp2;
@@ -443,7 +391,7 @@ PTDB TDBJDBC::CopyOne(PTABS t)
} // endfor cp1
return tp;
-} // end of CopyOne
+} // end of Clone
/***********************************************************************/
/* Allocate JDBC column description block. */
@@ -453,134 +401,6 @@ PCOL TDBJDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
return new(g)JDBCCOL(cdp, this, cprec, n);
} // end of MakeCol
-/******************************************************************/
-/* Convert an UTF-8 string to latin characters. */
-/******************************************************************/
-int TDBJDBC::Decode(char *txt, char *buf, size_t n)
-{
- uint dummy_errors;
- uint32 len= copy_and_convert(buf, n, &my_charset_latin1,
- txt, strlen(txt),
- &my_charset_utf8_general_ci,
- &dummy_errors);
- buf[len]= '\0';
- return 0;
-} // end of Decode
-
-/***********************************************************************/
-/* MakeSQL: make the SQL statement use with JDBC connection. */
-/* TODO: when implementing EOM filtering, column only used in local */
-/* filter should be removed from column list. */
-/***********************************************************************/
-bool TDBJDBC::MakeSQL(PGLOBAL g, bool cnt)
-{
- char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
- int len;
- bool oom = false, first = true;
- PTABLE tablep = To_Table;
- PCOL colp;
-
- if (Srcdef) {
- Query = new(g)STRING(g, 0, Srcdef);
- return false;
- } // endif Srcdef
-
- // Allocate the string used to contain the Query
- Query = new(g)STRING(g, 1023, "SELECT ");
-
- if (!cnt) {
- if (Columns) {
- // Normal SQL statement to retrieve results
- for (colp = Columns; colp; colp = colp->GetNext())
- if (!colp->IsSpecial()) {
- if (!first)
- oom |= Query->Append(", ");
- else
- first = false;
-
- // Column name can be encoded in UTF-8
- Decode(colp->GetName(), buf, sizeof(buf));
-
- if (Quote) {
- // Put column name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
- } else
- oom |= Query->Append(buf);
-
- ((PJDBCCOL)colp)->Rank = ++Ncol;
- } // endif colp
-
- } else
- // !Columns can occur for queries such that sql count(*) from...
- // for which we will count the rows from sql * from...
- oom |= Query->Append('*');
-
- } else
- // SQL statement used to retrieve the size of the result
- oom |= Query->Append("count(*)");
-
- oom |= Query->Append(" FROM ");
-
- if (Catalog && *Catalog)
- catp = Catalog;
-
- //if (tablep->GetSchema())
- // schmp = (char*)tablep->GetSchema();
- //else
- if (Schema && *Schema)
- schmp = Schema;
-
- if (catp) {
- oom |= Query->Append(catp);
-
- if (schmp) {
- oom |= Query->Append('.');
- oom |= Query->Append(schmp);
- } // endif schmp
-
- oom |= Query->Append('.');
- } else if (schmp) {
- oom |= Query->Append(schmp);
- oom |= Query->Append('.');
- } // endif schmp
-
- // Table name can be encoded in UTF-8
- Decode(TableName, buf, sizeof(buf));
-
- if (Quote) {
- // Put table name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
- } else
- oom |= Query->Append(buf);
-
- len = Query->GetLength();
-
- if (To_CondFil) {
- if (Mode == MODE_READ) {
- oom |= Query->Append(" WHERE ");
- oom |= Query->Append(To_CondFil->Body);
- len = Query->GetLength() + 1;
- } else
- len += (strlen(To_CondFil->Body) + 256);
-
- } else
- len += ((Mode == MODE_READX) ? 256 : 1);
-
- if (oom || Query->Resize(len)) {
- strcpy(g->Message, "MakeSQL: Out of memory");
- return true;
- } // endif oom
-
- if (trace)
- htrc("Query=%s\n", Query->GetStr());
-
- return false;
-} // end of MakeSQL
-
/***********************************************************************/
/* MakeInsert: make the Insert statement used with JDBC connection. */
/***********************************************************************/
@@ -601,7 +421,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
// Column name can be encoded in UTF-8
Decode(colp->GetName(), buf, sizeof(buf));
len += (strlen(buf) + 6); // comma + quotes + valist
- ((PJDBCCOL)colp)->Rank = ++Nparm;
+ ((PEXTCOL)colp)->SetRank(++Nparm);
} // endif colp
// Below 32 is enough to contain the fixed part of the query
@@ -711,76 +531,6 @@ bool TDBJDBC::SetParameters(PGLOBAL g)
} // end of SetParameters
/***********************************************************************/
-/* MakeCommand: make the Update or Delete statement to send to the */
-/* MySQL server. Limited to remote values and filtering. */
-/***********************************************************************/
-bool TDBJDBC::MakeCommand(PGLOBAL g)
-{
- char *p, *stmt, name[68], *body = NULL, *qc = Jcp->GetQuoteChar();
- char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1);
- bool qtd = Quoted > 0;
- int i = 0, k = 0;
-
- // Make a lower case copy of the originale query and change
- // back ticks to the data source identifier quoting character
- do {
- qrystr[i] = (Qrystr[i] == '`') ? *qc : tolower(Qrystr[i]);
- } while (Qrystr[i++]);
-
- if (To_CondFil && (p = strstr(qrystr, " where "))) {
- p[7] = 0; // Remove where clause
- Qrystr[(p - qrystr) + 7] = 0;
- body = To_CondFil->Body;
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr)
- + strlen(body) + 64);
- } else
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
-
- // Check whether the table name is equal to a keyword
- // If so, it must be quoted in the original query
- strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
-
- if (strstr(" update delete low_priority ignore quick from ", name)) {
- strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
- k += 2;
- } else
- strlwr(strcpy(name, Name)); // Not a keyword
-
- if ((p = strstr(qrystr, name))) {
- for (i = 0; i < p - qrystr; i++)
- stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i];
-
- stmt[i] = 0;
- k += i + (int)strlen(Name);
-
- if (qtd && *(p-1) == ' ')
- strcat(strcat(strcat(stmt, qc), TableName), qc);
- else
- strcat(stmt, TableName);
-
- i = (int)strlen(stmt);
-
- do {
- stmt[i++] = (Qrystr[k] == '`') ? *qc : Qrystr[k];
- } while (Qrystr[k++]);
-
- if (body)
- strcat(stmt, body);
-
- } else {
- sprintf(g->Message, "Cannot use this %s command",
- (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return NULL;
- } // endif p
-
- if (trace)
- htrc("Command=%s\n", stmt);
-
- Query = new(g)STRING(g, 0, stmt);
- return (!Query->GetSize());
-} // end of MakeCommand
-
-/***********************************************************************/
/* ResetSize: call by TDBMUL when calculating size estimate. */
/***********************************************************************/
void TDBJDBC::ResetSize(void)
@@ -834,33 +584,6 @@ int TDBJDBC::Cardinality(PGLOBAL g)
} // end of Cardinality
/***********************************************************************/
-/* JDBC GetMaxSize: returns table size estimate in number of lines. */
-/***********************************************************************/
-int TDBJDBC::GetMaxSize(PGLOBAL g)
-{
- if (MaxSize < 0) {
- if (Mode == MODE_DELETE)
- // Return 0 in mode DELETE in case of delete all.
- MaxSize = 0;
- else if (!Cardinality(NULL))
- MaxSize = 10; // To make MySQL happy
- else if ((MaxSize = Cardinality(g)) < 0)
- MaxSize = 12; // So we can see an error occured
-
- } // endif MaxSize
-
- return MaxSize;
-} // end of GetMaxSize
-
-/***********************************************************************/
-/* Return max size value. */
-/***********************************************************************/
-int TDBJDBC::GetProgMax(PGLOBAL g)
-{
- return GetMaxSize(g);
-} // end of GetProgMax
-
-/***********************************************************************/
/* JDBC Access Method opening routine. */
/* New method now that this routine is called recursively (last table */
/* first in reverse order): index blocks are immediately linked to */
@@ -997,6 +720,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g)
return false;
} // end of OpenDB
+#if 0
/***********************************************************************/
/* GetRecpos: return the position of last read record. */
/***********************************************************************/
@@ -1004,6 +728,7 @@ int TDBJDBC::GetRecpos(void)
{
return Fpos;
} // end of GetRecpos
+#endif // 0
/***********************************************************************/
/* SetRecpos: set the position of next read record. */
@@ -1105,8 +830,7 @@ int TDBJDBC::ReadDB(PGLOBAL g)
int rc;
if (trace > 1)
- htrc("JDBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
+ htrc("JDBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE) {
if (!Query && MakeCommand(g))
@@ -1125,12 +849,6 @@ int TDBJDBC::ReadDB(PGLOBAL g)
} // endif Mode
- if (To_Kindex) {
- // Direct access of JDBC tables is not implemented
- strcpy(g->Message, "No JDBC direct access");
- return RC_FX;
- } // endif To_Kindex
-
/*********************************************************************/
/* Now start the reading process. */
/* Here is the place to fetch the line(s). */
@@ -1302,70 +1020,26 @@ void TDBJDBC::CloseDB(PGLOBAL g)
/* JDBCCOL public constructor. */
/***********************************************************************/
JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+ : EXTCOL(cdp, tdbp, cprec, i, am)
{
- if (cprec) {
- Next = cprec->GetNext();
- cprec->SetNext(this);
- } else {
- Next = tdbp->GetColumns();
- tdbp->SetColumns(this);
- } // endif cprec
-
- // Set additional JDBC access method information for column.
- Crp = NULL;
- //Long = cdp->GetLong();
- Long = Precision;
- //strcpy(F_Date, cdp->F_Date);
- To_Val = NULL;
-//Slen = 0;
-//StrLen = &Slen;
-//Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 0; // Not known yet
-
- if (trace)
- htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this);
-
} // end of JDBCCOL constructor
/***********************************************************************/
/* JDBCCOL private constructor. */
/***********************************************************************/
-JDBCCOL::JDBCCOL(void) : COLBLK()
+JDBCCOL::JDBCCOL(void) : EXTCOL()
{
- Crp = NULL;
- Buf_Type = TYPE_INT; // This is a count(*) column
- // Set additional Dos access method information for column.
- Long = sizeof(int);
- To_Val = NULL;
-//Slen = 0;
-//StrLen = &Slen;
-//Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 1;
} // end of JDBCCOL constructor
/***********************************************************************/
/* JDBCCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
-JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
+JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
{
- Crp = col1->Crp;
- Long = col1->Long;
- //strcpy(F_Date, col1->F_Date);
- To_Val = col1->To_Val;
-//Slen = col1->Slen;
-//StrLen = col1->StrLen;
-//Sqlbuf = col1->Sqlbuf;
- Bufp = col1->Bufp;
- Blkp = col1->Blkp;
- Rank = col1->Rank;
} // end of JDBCCOL copy constructor
+#if 0
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
@@ -1411,6 +1085,7 @@ bool JDBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
Status = (ok) ? BUF_EMPTY : BUF_NO;
return false;
} // end of SetBuffer
+#endif // 0
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
@@ -1456,72 +1131,8 @@ void JDBCCOL::ReadColumn(PGLOBAL g)
} // end of ReadColumn
-#if 0
/***********************************************************************/
-/* AllocateBuffers: allocate the extended buffer for SQLExtendedFetch */
-/* or Fetch. Note: we use Long+1 here because JDBC must have space */
-/* for the ending null character. */
-/***********************************************************************/
-void JDBCCOL::AllocateBuffers(PGLOBAL g, int rows)
-{
- if (Buf_Type == TYPE_DATE)
- Sqlbuf = (TIMESTAMP_STRUCT*)PlugSubAlloc(g, NULL,
- sizeof(TIMESTAMP_STRUCT));
-
- if (!rows)
- return;
-
- if (Buf_Type == TYPE_DATE)
- Bufp = PlugSubAlloc(g, NULL, rows * sizeof(TIMESTAMP_STRUCT));
- else {
- Blkp = AllocValBlock(g, NULL, Buf_Type, rows, GetBuflen(),
- GetScale(), true, false, false);
- Bufp = Blkp->GetValPointer();
- } // endelse
-
- if (rows > 1)
- StrLen = (SQLLEN *)PlugSubAlloc(g, NULL, rows * sizeof(SQLLEN));
-
-} // end of AllocateBuffers
-
-/***********************************************************************/
-/* Returns the buffer to use for Fetch or Extended Fetch. */
-/***********************************************************************/
-void *JDBCCOL::GetBuffer(DWORD rows)
-{
- if (rows && To_Tdb) {
- assert(rows == (DWORD)((TDBJDBC*)To_Tdb)->Rows);
- return Bufp;
- } else
- return (Buf_Type == TYPE_DATE) ? Sqlbuf : Value->GetTo_Val();
-
-} // end of GetBuffer
-
-/***********************************************************************/
-/* Returns the buffer length to use for Fetch or Extended Fetch. */
-/***********************************************************************/
-SWORD JDBCCOL::GetBuflen(void)
-{
- SWORD flen;
-
- switch (Buf_Type) {
- case TYPE_DATE:
- flen = (SWORD)sizeof(TIMESTAMP_STRUCT);
- break;
- case TYPE_STRING:
- case TYPE_DECIM:
- flen = (SWORD)Value->GetClen() + 1;
- break;
- default:
- flen = (SWORD)Value->GetClen();
- } // endswitch Buf_Type
-
- return flen;
-} // end of GetBuflen
-#endif // 0
-
-/***********************************************************************/
-/* WriteColumn: make sure the bind buffer is updated. */
+/* WriteColumn: Convert if necessary. */
/***********************************************************************/
void JDBCCOL::WriteColumn(PGLOBAL g)
{
@@ -1531,30 +1142,6 @@ void JDBCCOL::WriteColumn(PGLOBAL g)
if (Value != To_Val)
Value->SetValue_pval(To_Val, FALSE); // Convert the inserted value
-#if 0
- if (Buf_Type == TYPE_DATE) {
- struct tm tm, *dbtime = ((DTVAL*)Value)->GetGmTime(&tm);
-
- Sqlbuf->second = dbtime->tm_sec;
- Sqlbuf->minute = dbtime->tm_min;
- Sqlbuf->hour = dbtime->tm_hour;
- Sqlbuf->day = dbtime->tm_mday;
- Sqlbuf->month = dbtime->tm_mon + 1;
- Sqlbuf->year = dbtime->tm_year + 1900;
- Sqlbuf->fraction = 0;
- } else if (Buf_Type == TYPE_DECIM) {
- // Some data sources require local decimal separator
- char *p, sep = ((PTDBJDBC)To_Tdb)->Sep;
-
- if (sep && (p = strchr(Value->GetCharValue(), '.')))
- *p = sep;
-
- } // endif Buf_Type
-
- if (Nullable)
- *StrLen = (Value->IsNull()) ? SQL_NULL_DATA :
- (IsTypeChar(Buf_Type)) ? SQL_NTS : 0;
-#endif // 0
} // end of WriteColumn
/* -------------------------- Class TDBXJDC -------------------------- */
@@ -1795,7 +1382,7 @@ TDBJTB::TDBJTB(PJDBCDEF tdp) : TDBJDRV(tdp)
{
Schema = tdp->Tabschema;
Tab = tdp->Tabname;
- Tabtype = tdp->Tabtype;
+ Tabtype = tdp->Tabtyp;
Ops.Driver = tdp->Driver;
Ops.Url = tdp->Url;
Ops.User = tdp->Username;
diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h
index fee8223abaf..46d2073e923 100644
--- a/storage/connect/tabjdbc.h
+++ b/storage/connect/tabjdbc.h
@@ -21,7 +21,7 @@ typedef class JSRCCOL *PJSRCCOL;
/***********************************************************************/
/* JDBC table. */
/***********************************************************************/
-class DllExport JDBCDEF : public TABDEF { /* Logical table description */
+class DllExport JDBCDEF : public EXTDEF { /* Logical table description */
friend class TDBJDBC;
friend class TDBXJDC;
friend class TDBJDRV;
@@ -33,17 +33,8 @@ public:
// Implementation
virtual const char *GetType(void) { return "JDBC"; }
- PSZ GetTabname(void) { return Tabname; }
- PSZ GetTabschema(void) { return Tabschema; }
- PSZ GetTabcat(void) { return Tabcat; }
- PSZ GetSrcdef(void) { return Srcdef; }
- char GetSep(void) { return (Sep) ? *Sep : 0; }
- int GetQuoted(void) { return Quoted; }
-//int GetCatver(void) { return Catver; }
- int GetOptions(void) { return Options; }
// Methods
- virtual int Indexable(void) { return 2; }
virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
virtual PTDB GetTable(PGLOBAL g, MODE m);
int ParseURL(PGLOBAL g, char *url, bool b = true);
@@ -53,28 +44,7 @@ protected:
// Members
PSZ Driver; /* JDBC driver */
PSZ Url; /* JDBC driver URL */
- PSZ Tabname; /* External table name */
PSZ Wrapname; /* Java wrapper name */
- PSZ Tabschema; /* External table schema */
- PSZ Username; /* User connect name */
- PSZ Password; /* Password connect info */
-//PSZ Prop; /* Connection Properties */
- PSZ Tabcat; /* External table catalog */
- PSZ Tabtype; /* External table type */
- PSZ Colpat; /* Catalog column pattern */
- PSZ Srcdef; /* The source table SQL definition */
- PSZ Qchar; /* Identifier quoting character */
- PSZ Qrystr; /* The original query */
- PSZ Sep; /* Decimal separator */
- int Options; /* Open connection options */
-//int Cto; /* Open connection timeout */
-//int Qto; /* Query (command) timeout */
- int Quoted; /* Identifier quoting level */
- int Maxerr; /* Maxerr for an Exec table */
- int Maxres; /* Maxres for a catalog table */
- int Memory; /* Put result set in memory */
- bool Scrollable; /* Use scrollable cursor */
- bool Xsrc; /* Execution type */
}; // end of JDBCDEF
#if !defined(NJDBC)
@@ -84,34 +54,34 @@ protected:
/* This is the JDBC Access Method class declaration for files from */
/* other DB drivers to be accessed via JDBC. */
/***********************************************************************/
-class TDBJDBC : public TDBASE {
+class TDBJDBC : public TDBEXT {
friend class JDBCCOL;
friend class JDBConn;
public:
// Constructor
TDBJDBC(PJDBCDEF tdp = NULL);
- TDBJDBC(PTDBJDBC tdbp);
+ TDBJDBC(PTDBJDBC tdbp);
// Implementation
- virtual AMT GetAmType(void) { return TYPE_AM_JDBC; }
- virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g)TDBJDBC(this); }
+ virtual AMT GetAmType(void) {return TYPE_AM_JDBC;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBJDBC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
- virtual int GetRecpos(void);
+ virtual PTDB Clone(PTABS t);
+//virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
//virtual PSZ GetFile(PGLOBAL g);
//virtual void SetFile(PGLOBAL g, PSZ fn);
virtual void ResetSize(void);
- //virtual int GetAffectedRows(void) {return AftRows;}
+//virtual int GetAffectedRows(void) {return AftRows;}
virtual PSZ GetServer(void) { return "JDBC"; }
virtual int Indexable(void) { return 2; }
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
- virtual int GetProgMax(PGLOBAL g);
+//virtual int GetMaxSize(PGLOBAL g);
+//virtual int GetProgMax(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -121,97 +91,50 @@ public:
protected:
// Internal functions
- int Decode(char *utf, char *buf, size_t n);
- bool MakeSQL(PGLOBAL g, bool cnt);
+//int Decode(char *utf, char *buf, size_t n);
+//bool MakeSQL(PGLOBAL g, bool cnt);
bool MakeInsert(PGLOBAL g);
- bool MakeCommand(PGLOBAL g);
- //bool MakeFilter(PGLOBAL g, bool c);
+//virtual bool MakeCommand(PGLOBAL g);
+//bool MakeFilter(PGLOBAL g, bool c);
bool SetParameters(PGLOBAL g);
- //char *MakeUpdate(PGLOBAL g);
- //char *MakeDelete(PGLOBAL g);
+//char *MakeUpdate(PGLOBAL g);
+//char *MakeDelete(PGLOBAL g);
// Members
JDBConn *Jcp; // Points to a JDBC connection class
JDBCCOL *Cnp; // Points to count(*) column
JDBCPARM Ops; // Additional parameters
- PSTRG Query; // Constructed SQL query
char *WrapName; // Points to Java wrapper name
- char *TableName; // Points to JDBC table name
- char *Schema; // Points to JDBC table Schema
- char *User; // User connect info
- char *Pwd; // Password connect info
- char *Catalog; // Points to JDBC table Catalog
- char *Srcdef; // The source table SQL definition
- char *Count; // Points to count(*) SQL statement
-//char *Where; // Points to local where clause
- char *Quote; // The identifier quoting character
- char *MulConn; // Used for multiple JDBC tables
- char *DBQ; // The address part of Connect string
- char *Qrystr; // The original query
- char Sep; // The decimal separator
- int Options; // Connect options
-//int Cto; // Connect timeout
-//int Qto; // Query timeout
- int Quoted; // The identifier quoting level
- int Fpos; // Position of last read record
- int Curpos; // Cursor position of last fetch
- int AftRows; // The number of affected rows
- int Rows; // Rowset size
- int CurNum; // Current buffer line number
- int Rbuf; // Number of lines read in buffer
- int BufSize; // Size of connect string buffer
- int Ncol; // The column number
- int Nparm; // The number of statement parameters
- int Memory; // 0: No 1: Alloc 2: Put 3: Get
-//bool Scrollable; // Use scrollable cursor --> in Ops
- bool Placed; // True for position reading
+//int Ncol; // The column number
bool Prepared; // True when using prepared statement
bool Werr; // Write error
bool Rerr; // Rewind error
- PQRYRES Qrp; // Points to storage result
}; // end of class TDBJDBC
/***********************************************************************/
/* Class JDBCCOL: JDBC access method column descriptor. */
/* This A.M. is used for JDBC tables. */
/***********************************************************************/
-class JDBCCOL : public COLBLK {
+class JDBCCOL : public EXTCOL {
friend class TDBJDBC;
public:
// Constructors
JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC");
- JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process
+ JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
- virtual int GetAmType(void) { return TYPE_AM_JDBC; }
-//SQLLEN *GetStrLen(void) { return StrLen; }
- int GetRank(void) { return Rank; }
-//PVBLK GetBlkp(void) {return Blkp;}
- void SetCrp(PCOLRES crp) { Crp = crp; }
+ virtual int GetAmType(void) { return TYPE_AM_JDBC; }
// Methods
- virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
- virtual void ReadColumn(PGLOBAL g);
- virtual void WriteColumn(PGLOBAL g);
-//void AllocateBuffers(PGLOBAL g, int rows);
-//void *GetBuffer(DWORD rows);
-//SWORD GetBuflen(void);
- // void Print(PGLOBAL g, FILE *, uint);
+//virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
protected:
- // Constructor used by GetMaxSize
- JDBCCOL(void);
+ // Constructor for count(*) column
+ JDBCCOL(void);
// Members
- //TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's
- PCOLRES Crp; // To storage result
- void *Bufp; // To extended buffer
- PVBLK Blkp; // To Value Block
- //char F_Date[12]; // Internal Date format
- PVAL To_Val; // To value used for Insert
-//SQLLEN *StrLen; // As returned by JDBC
-//SQLLEN Slen; // Used with Fetch
- int Rank; // Rank (position) number in the query
}; // end of class JDBCCOL
/***********************************************************************/
@@ -268,7 +191,7 @@ public:
JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC");
// Implementation
- //virtual int GetAmType(void) {return TYPE_AM_JDBC;}
+ virtual int GetAmType(void) {return TYPE_AM_JDBC;}
// Methods
virtual void ReadColumn(PGLOBAL g);
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 1b9ce8b64c9..1e11d454cfc 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -129,7 +129,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
if (tdp->Pretty == 2) {
if (tdp->Zipped) {
#if defined(ZIP_SUPPORT)
- tjsp = new(g) TDBJSON(tdp, new(g) ZIPFAM(tdp));
+ tjsp = new(g) TDBJSON(tdp, new(g) UNZFAM(tdp));
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -151,7 +151,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
if (tdp->Zipped) {
#if defined(ZIP_SUPPORT)
- tjnp = new(g)TDBJSN(tdp, new(g)ZIPFAM(tdp));
+ tjnp = new(g)TDBJSN(tdp, new(g)UNZFAM(tdp));
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -441,7 +441,14 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
if (Zipped) {
#if defined(ZIP_SUPPORT)
- txfp = new(g) ZIPFAM(this);
+ if (m == MODE_READ || m == MODE_UPDATE) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -479,7 +486,15 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
} else {
if (Zipped) {
#if defined(ZIP_SUPPORT)
- txfp = new(g)ZIPFAM(this);
+ if (m == MODE_READ || m == MODE_UPDATE) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
+ return NULL;
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -559,7 +574,7 @@ TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp)
} // end of TDBJSN copy constructor
// Used for update
-PTDB TDBJSN::CopyOne(PTABS t)
+PTDB TDBJSN::Clone(PTABS t)
{
G = NULL;
PTDB tp;
@@ -574,7 +589,7 @@ PTDB TDBJSN::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate JSN column description block. */
@@ -1563,7 +1578,7 @@ TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp)
} // end of TDBJSON copy constructor
// Used for update
-PTDB TDBJSON::CopyOne(PTABS t)
+PTDB TDBJSON::Clone(PTABS t)
{
PTDB tp;
PJCOL cp1, cp2;
@@ -1577,7 +1592,7 @@ PTDB TDBJSON::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Make the document tree from the object path. */
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index c9d30d48f2a..924ce387900 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -82,7 +82,7 @@ public:
void SetG(PGLOBAL g) {G = g;}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual PCOL InsertSpecialColumn(PCOL colp);
virtual int RowNumber(PGLOBAL g, bool b = FALSE)
@@ -188,7 +188,7 @@ class TDBJSON : public TDBJSN {
PJAR GetDoc(void) {return Doc;}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual int Cardinality(PGLOBAL g);
diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp
index c21bb1660ea..916449be6c6 100644
--- a/storage/connect/table.cpp
+++ b/storage/connect/table.cpp
@@ -1,7 +1,7 @@
/************** Table C++ Functions Source Code File (.CPP) ************/
-/* Name: TABLE.CPP Version 2.7 */
+/* Name: TABLE.CPP Version 2.8 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1999-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */
/* */
/* This file contains the TBX, TDB and OPJOIN classes functions. */
/***********************************************************************/
@@ -10,6 +10,7 @@
/* Include relevant MariaDB header file. */
/***********************************************************************/
#include "my_global.h"
+#include "sql_string.h"
/***********************************************************************/
/* Include required application header files */
@@ -40,8 +41,9 @@ void AddPointer(PTABS, void *);
/* TDB public constructors. */
/***********************************************************************/
TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum)
- {
- Use = USE_NO;
+{
+ To_Def = tdp;
+ Use = USE_NO;
To_Orig = NULL;
To_Filter = NULL;
To_CondFil = NULL;
@@ -49,14 +51,20 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum)
Name = (tdp) ? tdp->GetName() : NULL;
To_Table = NULL;
Columns = NULL;
- Degree = (tdp) ? tdp->GetDegree() : 0;
+ To_SetCols = NULL;
+ Degree = (tdp) ? tdp->GetDegree() : 0;
Mode = MODE_ANY;
Cardinal = -1;
- } // end of TDB standard constructor
+ MaxSize = -1;
+ Read_Only = (tdp) ? tdp->IsReadOnly() : false;
+ m_data_charset = (tdp) ? tdp->data_charset() : NULL;
+ csname = (tdp) ? tdp->csname : NULL;
+} // end of TDB standard constructor
TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum)
- {
- Use = tdbp->Use;
+{
+ To_Def = tdbp->To_Def;
+ Use = tdbp->Use;
To_Orig = tdbp;
To_Filter = NULL;
To_CondFil = NULL;
@@ -64,12 +72,192 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum)
Name = tdbp->Name;
To_Table = tdbp->To_Table;
Columns = NULL;
- Degree = tdbp->Degree;
+ To_SetCols = tdbp->To_SetCols; // ???
+ Degree = tdbp->Degree;
Mode = tdbp->Mode;
Cardinal = tdbp->Cardinal;
- } // end of TDB copy constructor
+ MaxSize = tdbp->MaxSize;
+ Read_Only = tdbp->IsReadOnly();
+ m_data_charset = tdbp->data_charset();
+ csname = tdbp->csname;
+} // end of TDB copy constructor
// Methods
+/***********************************************************************/
+/* Return the pointer on the charset of this table. */
+/***********************************************************************/
+CHARSET_INFO *TDB::data_charset(void)
+{
+ // If no DATA_CHARSET is specified, we assume that character
+ // set of the remote data is the same with CHARACTER SET
+ // definition of the SQL column.
+ return m_data_charset ? m_data_charset : &my_charset_bin;
+} // end of data_charset
+
+/***********************************************************************/
+/* Return the datapath of the DB this table belongs to. */
+/***********************************************************************/
+PSZ TDB::GetPath(void)
+{
+ return To_Def->GetPath();
+} // end of GetPath
+
+/***********************************************************************/
+/* Return true if name is a special column of this table. */
+/***********************************************************************/
+bool TDB::IsSpecial(PSZ name)
+{
+ for (PCOLDEF cdp = To_Def->GetCols(); cdp; cdp = cdp->GetNext())
+ if (!stricmp(cdp->GetName(), name) && (cdp->Flags & U_SPECIAL))
+ return true; // Special column to ignore while inserting
+
+ return false; // Not found or not special or not inserting
+} // end of IsSpecial
+
+/***********************************************************************/
+/* Initialize TDB based column description block construction. */
+/* name is used to call columns by name. */
+/* num is used by TBL to construct columns by index number. */
+/* Note: name=Null and num=0 for constructing all columns (select *) */
+/***********************************************************************/
+PCOL TDB::ColDB(PGLOBAL g, PSZ name, int num)
+{
+ int i;
+ PCOLDEF cdp;
+ PCOL cp, colp = NULL, cprec = NULL;
+
+ if (trace)
+ htrc("ColDB: am=%d colname=%s tabname=%s num=%d\n",
+ GetAmType(), SVP(name), Name, num);
+
+ for (cdp = To_Def->GetCols(), i = 1; cdp; cdp = cdp->GetNext(), i++)
+ if ((!name && !num) ||
+ (name && !stricmp(cdp->GetName(), name)) || num == i) {
+ /*****************************************************************/
+ /* Check for existence of desired column. */
+ /* Also find where to insert the new block. */
+ /*****************************************************************/
+ for (cp = Columns; cp; cp = cp->GetNext())
+ if ((num && cp->GetIndex() == i) ||
+ (name && !stricmp(cp->GetName(), name)))
+ break; // Found
+ else if (cp->GetIndex() < i)
+ cprec = cp;
+
+ if (trace)
+ htrc("cdp(%d).Name=%s cp=%p\n", i, cdp->GetName(), cp);
+
+ /*****************************************************************/
+ /* Now take care of Column Description Block. */
+ /*****************************************************************/
+ if (cp)
+ colp = cp;
+ else if (!(cdp->Flags & U_SPECIAL))
+ colp = MakeCol(g, cdp, cprec, i);
+ else if (Mode != MODE_INSERT)
+ colp = InsertSpcBlk(g, cdp);
+
+ if (trace)
+ htrc("colp=%p\n", colp);
+
+ if (name || num)
+ break;
+ else if (colp && !colp->IsSpecial())
+ cprec = colp;
+
+ } // endif Name
+
+ return (colp);
+} // end of ColDB
+
+/***********************************************************************/
+/* InsertSpecialColumn: Put a special column ahead of the column list.*/
+/***********************************************************************/
+PCOL TDB::InsertSpecialColumn(PCOL colp)
+{
+ if (!colp->IsSpecial())
+ return NULL;
+
+ colp->SetNext(Columns);
+ Columns = colp;
+ return colp;
+} // end of InsertSpecialColumn
+
+/***********************************************************************/
+/* Make a special COLBLK to insert in a table. */
+/***********************************************************************/
+PCOL TDB::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp)
+{
+ //char *name = cdp->GetName();
+ char *name = cdp->GetFmt();
+ PCOLUMN cp;
+ PCOL colp;
+
+ cp = new(g)COLUMN(cdp->GetName());
+
+ if (!To_Table) {
+ strcpy(g->Message, "Cannot make special column: To_Table is NULL");
+ return NULL;
+ } else
+ cp->SetTo_Table(To_Table);
+
+ if (!stricmp(name, "FILEID") || !stricmp(name, "FDISK") ||
+ !stricmp(name, "FPATH") || !stricmp(name, "FNAME") ||
+ !stricmp(name, "FTYPE") || !stricmp(name, "SERVID")) {
+ if (!To_Def || !(To_Def->GetPseudo() & 2)) {
+ sprintf(g->Message, MSG(BAD_SPEC_COLUMN));
+ return NULL;
+ } // endif Pseudo
+
+ if (!stricmp(name, "FILEID"))
+ colp = new(g)FIDBLK(cp, OP_XX);
+ else if (!stricmp(name, "FDISK"))
+ colp = new(g)FIDBLK(cp, OP_FDISK);
+ else if (!stricmp(name, "FPATH"))
+ colp = new(g)FIDBLK(cp, OP_FPATH);
+ else if (!stricmp(name, "FNAME"))
+ colp = new(g)FIDBLK(cp, OP_FNAME);
+ else if (!stricmp(name, "FTYPE"))
+ colp = new(g)FIDBLK(cp, OP_FTYPE);
+ else
+ colp = new(g)SIDBLK(cp);
+
+ } else if (!stricmp(name, "TABID")) {
+ colp = new(g)TIDBLK(cp);
+ } else if (!stricmp(name, "PARTID")) {
+ colp = new(g)PRTBLK(cp);
+ //} else if (!stricmp(name, "CONID")) {
+ // colp = new(g) CIDBLK(cp);
+ } else if (!stricmp(name, "ROWID")) {
+ colp = new(g)RIDBLK(cp, false);
+ } else if (!stricmp(name, "ROWNUM")) {
+ colp = new(g)RIDBLK(cp, true);
+ } else {
+ sprintf(g->Message, MSG(BAD_SPECIAL_COL), name);
+ return NULL;
+ } // endif's name
+
+ if (!(colp = InsertSpecialColumn(colp))) {
+ sprintf(g->Message, MSG(BAD_SPECIAL_COL), name);
+ return NULL;
+ } // endif Insert
+
+ return (colp);
+} // end of InsertSpcBlk
+
+/***********************************************************************/
+/* Marks DOS/MAP table columns used in internal joins. */
+/* tdb2 is the top of tree or first tdb in chained tdb's and tdbp */
+/* points to the currently marked tdb. */
+/* Two questions here: exact meaning of U_J_INT ? */
+/* Why is the eventual reference to To_Key_Col not marked U_J_EXT ? */
+/***********************************************************************/
+void TDB::MarkDB(PGLOBAL, PTDB tdb2)
+{
+ if (trace)
+ htrc("DOS MarkDB: tdbp=%p tdb2=%p\n", this, tdb2);
+
+} // end of MarkDB
/***********************************************************************/
/* RowNumber: returns the current row ordinal number. */
@@ -86,7 +274,7 @@ PTDB TDB::Copy(PTABS t)
//PGLOBAL g = t->G; // Is this really useful ???
for (tdb1 = this; tdb1; tdb1 = tdb1->Next) {
- tp = tdb1->CopyOne(t);
+ tp = tdb1->Clone(t);
if (!outp)
outp = tp;
@@ -100,6 +288,15 @@ PTDB TDB::Copy(PTABS t)
return outp;
} // end of Copy
+/***********************************************************************/
+/* SetRecpos: Replace the table at the specified position. */
+/***********************************************************************/
+bool TDB::SetRecpos(PGLOBAL g, int)
+{
+ strcpy(g->Message, MSG(SETRECPOS_NIY));
+ return true;
+} // end of SetRecpos
+
void TDB::Print(PGLOBAL g, FILE *f, uint n)
{
PCOL cp;
@@ -135,34 +332,34 @@ void TDB::Print(PGLOBAL, char *ps, uint)
/***********************************************************************/
TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp)
{
- To_Def = tdp;
+//To_Def = tdp;
To_Link = NULL;
To_Key_Col = NULL;
To_Kindex = NULL;
To_Xdp = NULL;
- To_SetCols = NULL;
+//To_SetCols = NULL;
Ftype = RECFM_NAF;
- MaxSize = -1;
+//MaxSize = -1;
Knum = 0;
- Read_Only = (tdp) ? tdp->IsReadOnly() : false;
- m_data_charset= (tdp) ? tdp->data_charset() : NULL;
- csname = (tdp) ? tdp->csname : NULL;
+//Read_Only = (tdp) ? tdp->IsReadOnly() : false;
+//m_data_charset= (tdp) ? tdp->data_charset() : NULL;
+//csname = (tdp) ? tdp->csname : NULL;
} // end of TDBASE constructor
TDBASE::TDBASE(PTDBASE tdbp) : TDB(tdbp)
{
- To_Def = tdbp->To_Def;
+//To_Def = tdbp->To_Def;
To_Link = tdbp->To_Link;
To_Key_Col = tdbp->To_Key_Col;
To_Kindex = tdbp->To_Kindex;
To_Xdp = tdbp->To_Xdp;
- To_SetCols = tdbp->To_SetCols; // ???
+//To_SetCols = tdbp->To_SetCols; // ???
Ftype = tdbp->Ftype;
- MaxSize = tdbp->MaxSize;
+//MaxSize = tdbp->MaxSize;
Knum = tdbp->Knum;
- Read_Only = tdbp->Read_Only;
- m_data_charset= tdbp->m_data_charset;
- csname = tdbp->csname;
+//Read_Only = tdbp->Read_Only;
+//m_data_charset= tdbp->m_data_charset;
+//csname = tdbp->csname;
} // end of TDBASE copy constructor
/***********************************************************************/
@@ -173,6 +370,7 @@ PCATLG TDBASE::GetCat(void)
return (To_Def) ? To_Def->GetCat() : NULL;
} // end of GetCat
+#if 0
/***********************************************************************/
/* Return the pointer on the charset of this table. */
/***********************************************************************/
@@ -334,6 +532,7 @@ PCOL TDBASE::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp)
return (colp);
} // end of InsertSpcBlk
+#endif // 0
/***********************************************************************/
/* ResetTableOpt: Wrong for this table type. */
@@ -362,6 +561,7 @@ void TDBASE::ResetKindex(PGLOBAL g, PKXBASE kxp)
To_Kindex = kxp;
} // end of ResetKindex
+#if 0
/***********************************************************************/
/* SetRecpos: Replace the table at the specified position. */
/***********************************************************************/
@@ -370,6 +570,7 @@ bool TDBASE::SetRecpos(PGLOBAL g, int)
strcpy(g->Message, MSG(SETRECPOS_NIY));
return true;
} // end of SetRecpos
+#endif // 0
/***********************************************************************/
/* Methods */
@@ -379,6 +580,7 @@ void TDBASE::PrintAM(FILE *f, char *m)
fprintf(f, "%s AM(%d): mode=%d\n", m, GetAmType(), Mode);
} // end of PrintAM
+#if 0
/***********************************************************************/
/* Marks DOS/MAP table columns used in internal joins. */
/* tdb2 is the top of tree or first tdb in chained tdb's and tdbp */
@@ -392,6 +594,7 @@ void TDBASE::MarkDB(PGLOBAL, PTDB tdb2)
htrc("DOS MarkDB: tdbp=%p tdb2=%p\n", this, tdb2);
} // end of MarkDB
+#endif // 0
/* ---------------------------TDBCAT class --------------------------- */
diff --git a/storage/connect/tabmac.cpp b/storage/connect/tabmac.cpp
index e6e2abb54e2..bbaba591540 100644
--- a/storage/connect/tabmac.cpp
+++ b/storage/connect/tabmac.cpp
@@ -12,7 +12,7 @@
#include "global.h"
#include "plgdbsem.h"
//#include "catalog.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
#include "colblk.h"
#include "tabmac.h"
diff --git a/storage/connect/tabmac.h b/storage/connect/tabmac.h
index f9a66e82eaa..47565bb2541 100644
--- a/storage/connect/tabmac.h
+++ b/storage/connect/tabmac.h
@@ -52,7 +52,7 @@ class TDBMAC : public TDBASE {
//virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBMAC(g, this);}
// Methods
-//virtual PTDB CopyOne(PTABS t);
+//virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return N;}
virtual int RowNumber(PGLOBAL g, bool b = false) {return N;}
diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp
index ea287558b44..78adde81d12 100644
--- a/storage/connect/tabmul.cpp
+++ b/storage/connect/tabmul.cpp
@@ -1,11 +1,11 @@
/************* TabMul C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABMUL */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to PlugDB Software Development 2003 - 2015 */
+/* (C) Copyright to PlugDB Software Development 2003 - 2017 */
/* Author: Olivier BERTRAND */
/* */
/* WHAT THIS PROGRAM DOES: */
@@ -73,7 +73,7 @@
/***********************************************************************/
/* TABMUL constructors. */
/***********************************************************************/
-TDBMUL::TDBMUL(PTDBASE tdbp) : TDBASE(tdbp->GetDef())
+TDBMUL::TDBMUL(PTDB tdbp) : TDBASE(tdbp->GetDef())
{
Tdbp = tdbp;
Filenames = NULL;
@@ -94,22 +94,22 @@ TDBMUL::TDBMUL(PTDBMUL tdbp) : TDBASE(tdbp)
} // end of TDBMUL copy constructor
// Method
-PTDB TDBMUL::CopyOne(PTABS t)
+PTDB TDBMUL::Clone(PTABS t)
{
PTDBMUL tp;
PGLOBAL g = t->G; // Is this really useful ???
tp = new(g) TDBMUL(this);
- tp->Tdbp = (PTDBASE)Tdbp->CopyOne(t);
+ tp->Tdbp = Tdbp->Clone(t);
tp->Columns = tp->Tdbp->GetColumns();
return tp;
- } // end of CopyOne
+ } // end of Clone
PTDB TDBMUL::Duplicate(PGLOBAL g)
{
PTDBMUL tmup = new(g) TDBMUL(this);
- tmup->Tdbp = (PTDBASE)Tdbp->Duplicate(g);
+ tmup->Tdbp = Tdbp->Duplicate(g);
return tmup;
} // end of Duplicate
@@ -658,7 +658,7 @@ TDBDIR::TDBDIR(PTDBDIR tdbp) : TDBASE(tdbp)
} // end of TDBDIR copy constructor
// Method
-PTDB TDBDIR::CopyOne(PTABS t)
+PTDB TDBDIR::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G; // Is this really useful ???
@@ -666,7 +666,7 @@ PTDB TDBDIR::CopyOne(PTABS t)
tp = new(g) TDBDIR(this);
tp->SetColumns(Columns);
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Initialize/get the components of the search file pattern. */
@@ -974,7 +974,7 @@ TDBSDR::TDBSDR(PTDBSDR tdbp) : TDBDIR(tdbp)
} // end of TDBSDR copy constructor
// Method
-PTDB TDBSDR::CopyOne(PTABS t)
+PTDB TDBSDR::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G; // Is this really useful ???
@@ -982,7 +982,7 @@ PTDB TDBSDR::CopyOne(PTABS t)
tp = new(g) TDBSDR(this);
tp->SetColumns(Columns);
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* SDR GetMaxSize: returns the number of retrieved files. */
@@ -1251,7 +1251,7 @@ TDBDHR::TDBDHR(PTDBDHR tdbp) : TDBASE(tdbp)
} // end of TDBDHR copy constructor
// Method
-PTDB TDBDHR::CopyOne(PTABS t)
+PTDB TDBDHR::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G; // Is this really useful ???
@@ -1259,7 +1259,7 @@ PTDB TDBDHR::CopyOne(PTABS t)
tp = new(g) TDBDHR(this);
tp->Columns = Columns;
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate DHR column description block. */
diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h
index 433cc3a2ee3..51fa7f9000a 100644
--- a/storage/connect/tabmul.h
+++ b/storage/connect/tabmul.h
@@ -1,7 +1,7 @@
/*************** Tabmul H Declares Source Code File (.H) ***************/
-/* Name: TABMUL.H Version 1.4 */
+/* Name: TABMUL.H Version 1.5 */
/* */
-/* (C) Copyright to PlugDB Software Development 2003-2012 */
+/* (C) Copyright to PlugDB Software Development 2003-2017 */
/* Author: Olivier BERTRAND */
/* */
/* This file contains the TDBMUL and TDBDIR classes declares. */
@@ -28,7 +28,7 @@ class DllExport TDBMUL : public TDBASE {
//friend class MULCOL;
public:
// Constructor
- TDBMUL(PTDBASE tdbp);
+ TDBMUL(PTDB tdbp);
TDBMUL(PTDBMUL tdbp);
// Implementation
@@ -37,7 +37,7 @@ class DllExport TDBMUL : public TDBASE {
// Methods
virtual void ResetDB(void);
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual bool IsSame(PTDB tp) {return tp == (PTDB)Tdbp;}
virtual PSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);}
virtual int GetRecpos(void) {return 0;}
@@ -61,7 +61,7 @@ class DllExport TDBMUL : public TDBASE {
protected:
// Members
- TDBASE *Tdbp; // Points to a (file) table class
+ PTDB Tdbp; // Points to a (file) table class
char* *Filenames; // Points to file names
int Rows; // Total rows of already read files
int Mul; // Type of multiple file list
@@ -112,7 +112,7 @@ class TDBDIR : public TDBASE {
{return (PTDB)new(g) TDBDIR(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return iFile;}
// Database routines
@@ -134,7 +134,7 @@ class TDBDIR : public TDBASE {
int iFile; // Index of currently retrieved file
#if defined(__WIN__)
_finddata_t FileData; // Find data structure
- int Hsearch; // Search handle
+ intptr_t Hsearch; // Search handle
char Drive[_MAX_DRIVE]; // Drive name
#else // !__WIN__
struct stat Fileinfo; // File info structure
@@ -168,7 +168,7 @@ class TDBSDR : public TDBDIR {
{return (PTDB)new(g) TDBSDR(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual int GetMaxSize(PGLOBAL g);
@@ -184,7 +184,7 @@ class TDBSDR : public TDBDIR {
struct _Sub_Dir *Next;
struct _Sub_Dir *Prev;
#if defined(__WIN__)
- int H; // Search handle
+ intptr_t H; // Search handle
#else // !__WIN__
DIR *D;
#endif // !__WIN__
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index 98a476bf94f..1a715819fc8 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -1,11 +1,11 @@
/************* TabMySQL C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABMYSQL */
/* ------------- */
-/* Version 1.9 */
+/* Version 2.0 */
/* */
/* AUTHOR: */
/* ------- */
-/* Olivier BERTRAND 2007-2015 */
+/* Olivier BERTRAND 2007-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -54,9 +54,10 @@
#include "global.h"
#include "plgdbsem.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabcol.h"
#include "colblk.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabmysql.h"
#include "valblk.h"
#include "tabutil.h"
@@ -84,16 +85,16 @@ MYSQLDEF::MYSQLDEF(void)
{
Pseudo = 2; // SERVID is Ok but not ROWID
Hostname = NULL;
- Database = NULL;
- Tabname = NULL;
- Srcdef = NULL;
- Username = NULL;
- Password = NULL;
+//Tabschema = NULL;
+//Tabname = NULL;
+//Srcdef = NULL;
+//Username = NULL;
+//Password = NULL;
Portnumber = 0;
Isview = false;
Bind = false;
Delayed = false;
- Xsrc = false;
+//Xsrc = false;
Huge = false;
} // end of MYSQLDEF constructor
@@ -128,7 +129,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name)
// TODO: We need to examine which of these can really be NULL
Hostname = PlugDup(g, server->host);
- Database = PlugDup(g, server->db);
+ Tabschema = PlugDup(g, server->db);
Username = PlugDup(g, server->username);
Password = PlugDup(g, server->password);
Portnumber = (server->port) ? server->port : GetDefaultPort();
@@ -200,7 +201,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL;
if (trace)
- htrc("server: %s Tabname: %s", url, Tabname);
+ htrc("server: %s TableName: %s", url, Tabname);
Server = url;
return GetServerInfo(g, url);
@@ -253,10 +254,10 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
return true;
} // endif
- if ((Database = strchr(Hostname, '/'))) {
- *Database++ = 0;
+ if ((Tabschema = strchr(Hostname, '/'))) {
+ *Tabschema++ = 0;
- if ((Tabname = strchr(Database, '/'))) {
+ if ((Tabname = strchr(Tabschema, '/'))) {
*Tabname++ = 0;
// Make sure there's not an extra /
@@ -265,7 +266,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
return true;
} // endif /
- } // endif Tabname
+ } // endif TableName
} // endif database
@@ -283,8 +284,8 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
if (Hostname[0] == 0)
Hostname = (b) ? GetStringCatInfo(g, "Host", "localhost") : NULL;
- if (!Database || !*Database)
- Database = (b) ? GetStringCatInfo(g, "Database", "*") : NULL;
+ if (!Tabschema || !*Tabschema)
+ Tabschema = (b) ? GetStringCatInfo(g, "Database", "*") : NULL;
if (!Tabname || !*Tabname)
Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL;
@@ -320,7 +321,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
if (!url || !*url) {
// Not using the connection URL
Hostname = GetStringCatInfo(g, "Host", "localhost");
- Database = GetStringCatInfo(g, "Database", "*");
+ Tabschema = GetStringCatInfo(g, "Database", "*");
Tabname = GetStringCatInfo(g, "Name", Name); // Deprecated
Tabname = GetStringCatInfo(g, "Tabname", Tabname);
Username = GetStringCatInfo(g, "User", "*");
@@ -334,7 +335,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
Delayed = !!GetIntCatInfo("Delayed", 0);
} else {
// MYSQL access from a PROXY table
- Database = GetStringCatInfo(g, "Database", Schema ? Schema : PlugDup(g, "*"));
+ Tabschema = GetStringCatInfo(g, "Database", Tabschema ? Tabschema : PlugDup(g, "*"));
Isview = GetBoolCatInfo("View", false);
// We must get other connection parms from the calling table
@@ -348,12 +349,12 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
Portnumber = GetIntCatInfo("Port", GetDefaultPort());
Server = Hostname;
} else {
- char *locdb = Database;
+ char *locdb = Tabschema;
if (ParseURL(g, url))
return true;
- Database = locdb;
+ Tabschema = locdb;
} // endif url
Tabname = Name;
@@ -362,7 +363,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) {
Read_Only = true;
Isview = true;
- } else if (CheckSelf(g, Hc->GetTable()->s, Hostname, Database,
+ } else if (CheckSelf(g, Hc->GetTable()->s, Hostname, Tabschema,
Tabname, Srcdef, Portnumber))
return true;
@@ -372,7 +373,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
// Specific for command executing tables
Xsrc = GetBoolCatInfo("Execsrc", false);
- Mxr = GetIntCatInfo("Maxerr", 0);
+ Maxerr = GetIntCatInfo("Maxerr", 0);
Huge = GetBoolCatInfo("Huge", false);
return false;
} // end of DefineAM
@@ -396,17 +397,17 @@ PTDB MYSQLDEF::GetTable(PGLOBAL g, MODE)
/***********************************************************************/
/* Implementation of the TDBMYSQL class. */
/***********************************************************************/
-TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBASE(tdp)
+TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBEXT(tdp)
{
if (tdp) {
Host = tdp->Hostname;
- Database = tdp->Database;
- Tabname = tdp->Tabname;
- Srcdef = tdp->Srcdef;
- User = tdp->Username;
- Pwd = tdp->Password;
+// Schema = tdp->Tabschema;
+// TableName = tdp->Tabname;
+// Srcdef = tdp->Srcdef;
+// User = tdp->Username;
+// Pwd = tdp->Password;
Server = tdp->Server;
- Qrystr = tdp->Qrystr;
+// Qrystr = tdp->Qrystr;
Quoted = MY_MAX(0, tdp->Quoted);
Port = tdp->Portnumber;
Isview = tdp->Isview;
@@ -415,14 +416,14 @@ TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBASE(tdp)
Myc.m_Use = tdp->Huge;
} else {
Host = NULL;
- Database = NULL;
- Tabname = NULL;
- Srcdef = NULL;
- User = NULL;
- Pwd = NULL;
+// Schema = NULL;
+// TableName = NULL;
+// Srcdef = NULL;
+// User = NULL;
+// Pwd = NULL;
Server = NULL;
- Qrystr = NULL;
- Quoted = 0;
+// Qrystr = NULL;
+// Quoted = 0;
Port = 0;
Isview = false;
Prep = false;
@@ -430,39 +431,40 @@ TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBASE(tdp)
} // endif tdp
Bind = NULL;
- Query = NULL;
+//Query = NULL;
Fetched = false;
m_Rc = RC_FX;
- AftRows = 0;
+//AftRows = 0;
N = -1;
- Nparm = 0;
+//Nparm = 0;
} // end of TDBMYSQL constructor
-TDBMYSQL::TDBMYSQL(PTDBMY tdbp) : TDBASE(tdbp)
+TDBMYSQL::TDBMYSQL(PTDBMY tdbp) : TDBEXT(tdbp)
{
Host = tdbp->Host;
- Database = tdbp->Database;
- Tabname = tdbp->Tabname;
- Srcdef = tdbp->Srcdef;
- User = tdbp->User;
- Pwd = tdbp->Pwd;
- Qrystr = tdbp->Qrystr;
- Quoted = tdbp->Quoted;
+//Schema = tdbp->Schema;
+//TableName = tdbp->TableName;
+//Srcdef = tdbp->Srcdef;
+//User = tdbp->User;
+//Pwd = tdbp->Pwd;
+//Qrystr = tdbp->Qrystr;
+//Quoted = tdbp->Quoted;
+ Server = tdbp->Server;
Port = tdbp->Port;
Isview = tdbp->Isview;
Prep = tdbp->Prep;
Delayed = tdbp->Delayed;
Bind = NULL;
- Query = tdbp->Query;
+//Query = tdbp->Query;
Fetched = tdbp->Fetched;
m_Rc = tdbp->m_Rc;
- AftRows = tdbp->AftRows;
+//AftRows = tdbp->AftRows;
N = tdbp->N;
- Nparm = tdbp->Nparm;
+//Nparm = tdbp->Nparm;
} // end of TDBMYSQL copy constructor
-// Is this really useful ???
-PTDB TDBMYSQL::CopyOne(PTABS t)
+// Is this really useful ??? --> Yes for UPDATE
+PTDB TDBMYSQL::Clone(PTABS t)
{
PTDB tp;
PCOL cp1, cp2;
@@ -477,7 +479,7 @@ PTDB TDBMYSQL::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate MYSQL column description block. */
@@ -504,10 +506,18 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
if (Query)
return false; // already done
- if (Srcdef) {
- Query = new(g)STRING(g, 0, Srcdef);
- return false;
- } // endif Srcdef
+ if (Srcdef) {
+ if (strstr(Srcdef, "%s")) {
+ char *fil;
+
+ fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1");
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil));
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
+ return false;
+ } // endif Srcdef
// Allocate the string used to contain Query
Query = new(g) STRING(g, 1023, "SELECT ");
@@ -540,7 +550,7 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
oom |= Query->Append(" FROM ");
oom |= Query->Append(tk);
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append(tk);
len = Query->GetLength();
@@ -608,7 +618,7 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
} // endif colp
// Below 40 is enough to contain the fixed part of the query
- len += (strlen(Tabname) + 40);
+ len += (strlen(TableName) + 40);
Query = new(g) STRING(g, len);
if (Delayed)
@@ -617,7 +627,7 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
oom = Query->Set("INSERT INTO ");
oom |= Query->Append(tk);
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append("` (");
for (colp = Columns; colp; colp = colp->GetNext()) {
@@ -653,11 +663,11 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
/* MakeCommand: make the Update or Delete statement to send to the */
/* MySQL server. Limited to remote values and filtering. */
/***********************************************************************/
-int TDBMYSQL::MakeCommand(PGLOBAL g)
+bool TDBMYSQL::MakeCommand(PGLOBAL g)
{
Query = new(g) STRING(g, strlen(Qrystr) + 64);
- if (Quoted > 0 || stricmp(Name, Tabname)) {
+ if (Quoted > 0 || stricmp(Name, TableName)) {
char *p, *qrystr, name[68];
bool qtd = Quoted > 0;
@@ -678,29 +688,29 @@ int TDBMYSQL::MakeCommand(PGLOBAL g)
if (qtd && *(p-1) == ' ') {
oom |= Query->Append('`');
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append('`');
} else
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append(Qrystr + (p - qrystr) + strlen(name));
if (oom) {
strcpy(g->Message, "MakeCommand: Out of memory");
- return RC_FX;
+ return true;
} else
strlwr(strcpy(qrystr, Query->GetStr()));
} else {
sprintf(g->Message, "Cannot use this %s command",
(Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return RC_FX;
+ return true;
} // endif p
} else
(void)Query->Set(Qrystr);
- return RC_OK;
+ return false;
} // end of MakeCommand
#if 0
@@ -727,7 +737,7 @@ int TDBMYSQL::MakeUpdate(PGLOBAL g)
} // endif sscanf
assert(!stricmp(cmd, "update"));
- strcat(strcat(strcat(strcpy(Query, "UPDATE "), qc), Tabname), qc);
+ strcat(strcat(strcat(strcpy(Query, "UPDATE "), qc), TableName), qc);
strcat(Query, end);
return RC_OK;
} // end of MakeUpdate
@@ -754,7 +764,7 @@ int TDBMYSQL::MakeDelete(PGLOBAL g)
} // endif sscanf
assert(!stricmp(cmd, "delete") && !stricmp(from, "from"));
- strcat(strcat(strcat(strcpy(Query, "DELETE FROM "), qc), Tabname), qc);
+ strcat(strcat(strcat(strcpy(Query, "DELETE FROM "), qc), TableName), qc);
if (*end)
strcat(Query, end);
@@ -776,15 +786,15 @@ int TDBMYSQL::Cardinality(PGLOBAL g)
char query[96];
MYSQLC myc;
- if (myc.Open(g, Host, Database, User, Pwd, Port, csname))
+ if (myc.Open(g, Host, Schema, User, Pwd, Port, csname))
return -1;
strcpy(query, "SELECT COUNT(*) FROM ");
if (Quoted > 0)
- strcat(strcat(strcat(query, "`"), Tabname), "`");
+ strcat(strcat(strcat(query, "`"), TableName), "`");
else
- strcat(query, Tabname);
+ strcat(query, TableName);
Cardinal = myc.GetTableSize(g, query);
myc.Close();
@@ -794,6 +804,7 @@ int TDBMYSQL::Cardinality(PGLOBAL g)
return Cardinal;
} // end of Cardinality
+#if 0
/***********************************************************************/
/* MYSQL GetMaxSize: returns the maximum number of rows in the table. */
/***********************************************************************/
@@ -812,6 +823,7 @@ int TDBMYSQL::GetMaxSize(PGLOBAL g)
return MaxSize;
} // end of GetMaxSize
+#endif // 0
/***********************************************************************/
/* This a fake routine as ROWID does not exist in MySQL. */
@@ -872,7 +884,7 @@ bool TDBMYSQL::OpenDB(PGLOBAL g)
/* servers allowing concurency in getting results ??? */
/*********************************************************************/
if (!Myc.Connected()) {
- if (Myc.Open(g, Host, Database, User, Pwd, Port, csname))
+ if (Myc.Open(g, Host, Schema, User, Pwd, Port, csname))
return true;
} // endif Connected
@@ -931,14 +943,14 @@ bool TDBMYSQL::OpenDB(PGLOBAL g)
char cmd[64];
int w;
- sprintf(cmd, "ALTER TABLE `%s` DISABLE KEYS", Tabname);
+ sprintf(cmd, "ALTER TABLE `%s` DISABLE KEYS", TableName);
m_Rc = Myc.ExecSQL(g, cmd, &w); // may fail for some engines
} // endif m_Rc
} else
// m_Rc = (Mode == MODE_DELETE) ? MakeDelete(g) : MakeUpdate(g);
- m_Rc = MakeCommand(g);
+ m_Rc = (MakeCommand(g)) ? RC_FX : RC_OK;
if (m_Rc == RC_FX) {
Myc.Close();
@@ -1030,7 +1042,7 @@ int TDBMYSQL::SendCommand(PGLOBAL g)
if (Myc.ExecSQLcmd(g, Query->GetStr(), &w) == RC_NF) {
AftRows = Myc.m_Afrw;
- sprintf(g->Message, "%s: %d affected rows", Tabname, AftRows);
+ sprintf(g->Message, "%s: %d affected rows", TableName, AftRows);
PushWarning(g, this, 0); // 0 means a Note
if (trace)
@@ -1039,7 +1051,7 @@ int TDBMYSQL::SendCommand(PGLOBAL g)
if (w && Myc.ExecSQL(g, "SHOW WARNINGS") == RC_OK) {
// We got warnings from the remote server
while (Myc.Fetch(g, -1) == RC_OK) {
- sprintf(g->Message, "%s: (%s) %s", Tabname,
+ sprintf(g->Message, "%s: (%s) %s", TableName,
Myc.GetCharField(1), Myc.GetCharField(2));
PushWarning(g, this);
} // endwhile Fetch
@@ -1116,8 +1128,7 @@ int TDBMYSQL::ReadDB(PGLOBAL g)
int rc;
if (trace > 1)
- htrc("MySQL ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
+ htrc("MySQL ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE)
return SendCommand(g);
@@ -1205,7 +1216,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g)
PDBUSER dup = PlgGetUser(g);
dup->Step = "Enabling indexes";
- sprintf(cmd, "ALTER TABLE `%s` ENABLE KEYS", Tabname);
+ sprintf(cmd, "ALTER TABLE `%s` ENABLE KEYS", TableName);
Myc.m_Rows = -1; // To execute the query
m_Rc = Myc.ExecSQL(g, cmd, &w); // May fail for some engines
} // endif m_Rc
@@ -1463,7 +1474,7 @@ TDBMYEXC::TDBMYEXC(PMYDEF tdp) : TDBMYSQL(tdp)
Havew = false;
Isw = false;
Warnings = 0;
- Mxr = tdp->Mxr;
+ Mxr = tdp->Maxerr;
Nerr = 0;
} // end of TDBMYEXC constructor
@@ -1479,7 +1490,7 @@ TDBMYEXC::TDBMYEXC(PTDBMYX tdbp) : TDBMYSQL(tdbp)
} // end of TDBMYEXC copy constructor
// Is this really useful ???
-PTDB TDBMYEXC::CopyOne(PTABS t)
+PTDB TDBMYEXC::Clone(PTABS t)
{
PTDB tp;
PCOL cp1, cp2;
@@ -1494,7 +1505,7 @@ PTDB TDBMYEXC::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate MYSQL column description block. */
@@ -1565,7 +1576,7 @@ bool TDBMYEXC::OpenDB(PGLOBAL g)
/* servers allowing concurency in getting results ??? */
/*********************************************************************/
if (!Myc.Connected())
- if (Myc.Open(g, Host, Database, User, Pwd, Port))
+ if (Myc.Open(g, Host, Schema, User, Pwd, Port))
return true;
Use = USE_OPEN; // Do it now in case we are recursively called
@@ -1728,7 +1739,7 @@ void MYXCOL::WriteColumn(PGLOBAL)
TDBMCL::TDBMCL(PMYDEF tdp) : TDBCAT(tdp)
{
Host = tdp->Hostname;
- Db = tdp->Database;
+ Db = tdp->Tabschema;
Tab = tdp->Tabname;
User = tdp->Username;
Pwd = tdp->Password;
diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h
index edb15b5cca6..050fa59259b 100644
--- a/storage/connect/tabmysql.h
+++ b/storage/connect/tabmysql.h
@@ -1,4 +1,4 @@
-// TDBMYSQL.H Olivier Bertrand 2007-2014
+// TDBMYSQL.H Olivier Bertrand 2007-2017
#include "myconn.h" // MySQL connection declares
typedef class MYSQLDEF *PMYDEF;
@@ -18,7 +18,7 @@ typedef class MYSQLC *PMYC;
/***********************************************************************/
/* MYSQL table. */
/***********************************************************************/
-class MYSQLDEF : public TABDEF {/* Logical table description */
+class MYSQLDEF : public EXTDEF {/* Logical table description */
friend class TDBMYSQL;
friend class TDBMYEXC;
friend class TDBMCL;
@@ -27,19 +27,18 @@ class MYSQLDEF : public TABDEF {/* Logical table description */
// Constructor
MYSQLDEF(void);
-
// Implementation
virtual const char *GetType(void) {return "MYSQL";}
inline PSZ GetHostname(void) {return Hostname;};
- inline PSZ GetDatabase(void) {return Database;};
- inline PSZ GetTabname(void) {return Tabname;}
- inline PSZ GetSrcdef(void) {return Srcdef;}
- inline PSZ GetUsername(void) {return Username;};
- inline PSZ GetPassword(void) {return Password;};
+//inline PSZ GetDatabase(void) {return Tabschema;};
+//inline PSZ GetTabname(void) {return Tabname;}
+//inline PSZ GetSrcdef(void) {return Srcdef;}
+//inline PSZ GetUsername(void) {return Username;};
+//inline PSZ GetPassword(void) {return Password;};
inline int GetPortnumber(void) {return Portnumber;}
// Methods
- virtual int Indexable(void) {return 2;}
+//virtual int Indexable(void) {return 2;}
virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
virtual PTDB GetTable(PGLOBAL g, MODE m);
bool ParseURL(PGLOBAL g, char *url, bool b = true);
@@ -48,27 +47,27 @@ class MYSQLDEF : public TABDEF {/* Logical table description */
protected:
// Members
PSZ Hostname; /* Host machine to use */
- PSZ Database; /* Database to be used by server */
- PSZ Tabname; /* External table name */
- PSZ Srcdef; /* The source table SQL definition */
- PSZ Username; /* User logon name */
- PSZ Password; /* Password logon info */
+//PSZ Tabschema; /* Database to be used by server */
+//PSZ Tabname; /* External table name */
+//PSZ Srcdef; /* The source table SQL definition */
+//PSZ Username; /* User logon name */
+//PSZ Password; /* Password logon info */
PSZ Server; /* PServerID */
- PSZ Qrystr; /* The original query */
+//PSZ Qrystr; /* The original query */
int Portnumber; /* MySQL port number (0 = default) */
- int Mxr; /* Maxerr for an Exec table */
- int Quoted; /* Identifier quoting level */
+//int Maxerr; /* Maxerr for an Exec table */
+//int Quoted; /* Identifier quoting level */
bool Isview; /* true if this table is a MySQL view */
bool Bind; /* Use prepared statement on insert */
bool Delayed; /* Delayed insert */
- bool Xsrc; /* Execution type */
+//bool Xsrc; /* Execution type */
bool Huge; /* True for big table */
}; // end of MYSQLDEF
/***********************************************************************/
/* This is the class declaration for the MYSQL table. */
/***********************************************************************/
-class TDBMYSQL : public TDBASE {
+class TDBMYSQL : public TDBEXT {
friend class MYSQLCOL;
public:
// Constructor
@@ -80,7 +79,7 @@ class TDBMYSQL : public TDBASE {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBMYSQL(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
//virtual int GetAffectedRows(void) {return AftRows;}
virtual int GetRecpos(void) {return N;}
virtual int GetProgMax(PGLOBAL g);
@@ -88,12 +87,12 @@ class TDBMYSQL : public TDBASE {
virtual int RowNumber(PGLOBAL g, bool b = false);
virtual bool IsView(void) {return Isview;}
virtual PSZ GetServer(void) {return Server;}
- void SetDatabase(LPCSTR db) {Database = (char*)db;}
+ void SetDatabase(LPCSTR db) {Schema = (char*)db;}
- // Database routines
+ // Schema routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
+//virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -111,7 +110,7 @@ class TDBMYSQL : public TDBASE {
bool MakeSelect(PGLOBAL g, bool mx);
bool MakeInsert(PGLOBAL g);
int BindColumns(PGLOBAL g);
- int MakeCommand(PGLOBAL g);
+ virtual bool MakeCommand(PGLOBAL g);
//int MakeUpdate(PGLOBAL g);
//int MakeDelete(PGLOBAL g);
int SendCommand(PGLOBAL g);
@@ -119,25 +118,25 @@ class TDBMYSQL : public TDBASE {
// Members
MYSQLC Myc; // MySQL connection class
MYSQL_BIND *Bind; // To the MySQL bind structure array
- PSTRG Query; // Constructed SQL query
+//PSTRG Query; // Constructed SQL query
char *Host; // Host machine to use
- char *User; // User logon info
- char *Pwd; // Password logon info
- char *Database; // Database to be used by server
- char *Tabname; // External table name
- char *Srcdef; // The source table SQL definition
+//char *User; // User logon info
+//char *Pwd; // Password logon info
+//char *Schema; // Database to be used by server
+//char *TableName; // External table name
+//char *Srcdef; // The source table SQL definition
char *Server; // The server ID
- char *Qrystr; // The original query
+//char *Qrystr; // The original query
bool Fetched; // True when fetch was done
bool Isview; // True if this table is a MySQL view
bool Prep; // Use prepared statement on insert
bool Delayed; // Use delayed insert
int m_Rc; // Return code from command
- int AftRows; // The number of affected rows
+//int AftRows; // The number of affected rows
int N; // The current table index
int Port; // MySQL port number (0 = default)
- int Nparm; // The number of statement parameters
- int Quoted; // The identifier quoting level
+//int Nparm; // The number of statement parameters
+//int Quoted; // The identifier quoting level
}; // end of class TDBMYSQL
/***********************************************************************/
@@ -162,9 +161,6 @@ class MYSQLCOL : public COLBLK {
bool FindRank(PGLOBAL g);
protected:
- // Default constructor not to be used
- MYSQLCOL(void) {}
-
// Members
MYSQL_BIND *Bind; // This column bind structure pointer
PVAL To_Val; // To value used for Update/Insert
@@ -187,7 +183,7 @@ class TDBMYEXC : public TDBMYSQL {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBMYEXC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual bool IsView(void) {return Isview;}
// Database routines
@@ -228,9 +224,6 @@ class MYXCOL : public MYSQLCOL {
virtual void WriteColumn(PGLOBAL g);
protected:
- // Default constructor not to be used
- MYXCOL(void) {}
-
// Members
char *Buffer; // To get returned message
int Flag; // Column content desc
diff --git a/storage/connect/taboccur.cpp b/storage/connect/taboccur.cpp
index 07e260154e0..07272d1b298 100644
--- a/storage/connect/taboccur.cpp
+++ b/storage/connect/taboccur.cpp
@@ -1,7 +1,7 @@
/************ TabOccur CPP Declares Source Code File (.CPP) ************/
-/* Name: TABOCCUR.CPP Version 1.1 */
+/* Name: TABOCCUR.CPP Version 1.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2013 - 2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2013 - 2017 */
/* */
/* OCCUR: Table that provides a view of a source table where the */
/* contain of several columns of the source table is placed in only */
@@ -39,12 +39,13 @@
/***********************************************************************/
#include "global.h"
#include "plgdbsem.h"
-#include "reldef.h"
+#include "xtable.h"
+#include "tabext.h"
+//#include "reldef.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabcol.h"
#include "taboccur.h"
-#include "xtable.h"
#include "tabmysql.h"
#include "ha_connect.h"
diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp
index f3ffc99ac15..488acdd330d 100644
--- a/storage/connect/tabodbc.cpp
+++ b/storage/connect/tabodbc.cpp
@@ -67,10 +67,11 @@
#include "plgdbsem.h"
#include "mycat.h"
#include "xtable.h"
+#include "tabext.h"
#include "odbccat.h"
#include "tabodbc.h"
#include "tabmul.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabcol.h"
#include "valblk.h"
#include "ha_connect.h"
@@ -95,10 +96,9 @@ bool ExactInfo(void);
/***********************************************************************/
ODBCDEF::ODBCDEF(void)
{
- Connect = Tabname = Tabschema = Username = Password = NULL;
- Tabcat = Colpat = Srcdef = Qchar = Qrystr = Sep = NULL;
- Catver = Options = Cto = Qto = Quoted = Maxerr = Maxres = Memory = 0;
- Scrollable = Xsrc = UseCnc = false;
+ Connect = NULL;
+ Catver = 0;
+ UseCnc = false;
} // end of ODBCDEF constructor
/***********************************************************************/
@@ -113,47 +113,50 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
return true;
} // endif Connect
- Tabname = GetStringCatInfo(g, "Name",
- (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
- Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- Tabschema = GetStringCatInfo(g, "Dbname", NULL);
- Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
- Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
- Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
- Username = GetStringCatInfo(g, "User", NULL);
- Password = GetStringCatInfo(g, "Password", NULL);
-
- if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
- Read_Only = true;
-
- Qrystr = GetStringCatInfo(g, "Query_String", "?");
- Sep = GetStringCatInfo(g, "Separator", NULL);
+ if (EXTDEF::DefineAM(g, am, poff))
+ return true;
+
+ // Tabname = GetStringCatInfo(g, "Name",
+ // (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+ // Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+ // Tabschema = GetStringCatInfo(g, "Dbname", NULL);
+ // Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
+ // Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
+ // Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
+ //Username = GetStringCatInfo(g, "User", NULL);
+ // Password = GetStringCatInfo(g, "Password", NULL);
+
+ // if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
+ // Read_Only = true;
+
+ // Qrystr = GetStringCatInfo(g, "Query_String", "?");
+ // Sep = GetStringCatInfo(g, "Separator", NULL);
Catver = GetIntCatInfo("Catver", 2);
- Xsrc = GetBoolCatInfo("Execsrc", FALSE);
- Maxerr = GetIntCatInfo("Maxerr", 0);
- Maxres = GetIntCatInfo("Maxres", 0);
- Quoted = GetIntCatInfo("Quoted", 0);
+ //Xsrc = GetBoolCatInfo("Execsrc", FALSE);
+ //Maxerr = GetIntCatInfo("Maxerr", 0);
+ //Maxres = GetIntCatInfo("Maxres", 0);
+ //Quoted = GetIntCatInfo("Quoted", 0);
Options = ODBConn::noOdbcDialog;
//Options = ODBConn::noOdbcDialog | ODBConn::useCursorLib;
Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
- if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
- Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
+ //if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
+ // Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
- if (Catfunc == FNC_COL)
- Colpat = GetStringCatInfo(g, "Colpat", NULL);
+ //if (Catfunc == FNC_COL)
+ // Colpat = GetStringCatInfo(g, "Colpat", NULL);
- if (Catfunc == FNC_TABLE)
- Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
+ //if (Catfunc == FNC_TABLE)
+ // Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
UseCnc = GetBoolCatInfo("UseDSN", false);
// Memory was Boolean, it is now integer
- if (!(Memory = GetIntCatInfo("Memory", 0)))
- Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
+ //if (!(Memory = GetIntCatInfo("Memory", 0)))
+ // Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
- Pseudo = 2; // FILID is Ok but not ROWID
+ //Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -162,7 +165,7 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
/***********************************************************************/
PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
{
- PTDBASE tdbp = NULL;
+ PTDB tdbp = NULL;
/*********************************************************************/
/* Allocate a TDB of the proper type. */
@@ -200,103 +203,103 @@ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
/***********************************************************************/
/* Implementation of the TDBODBC class. */
/***********************************************************************/
-TDBODBC::TDBODBC(PODEF tdp) : TDBASE(tdp)
+TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp)
{
Ocp = NULL;
Cnp = NULL;
if (tdp) {
Connect = tdp->Connect;
- TableName = tdp->Tabname;
- Schema = tdp->Tabschema;
+ //TableName = tdp->Tabname;
+ //Schema = tdp->Tabschema;
Ops.User = tdp->Username;
Ops.Pwd = tdp->Password;
- Catalog = tdp->Tabcat;
- Srcdef = tdp->Srcdef;
- Qrystr = tdp->Qrystr;
- Sep = tdp->GetSep();
- Options = tdp->Options;
+ //Catalog = tdp->Tabcat;
+ //Srcdef = tdp->Srcdef;
+ //Qrystr = tdp->Qrystr;
+ //Sep = tdp->GetSep();
+ //Options = tdp->Options;
Ops.Cto = tdp->Cto;
Ops.Qto = tdp->Qto;
- Quoted = MY_MAX(0, tdp->GetQuoted());
- Rows = tdp->GetElemt();
+ //Quoted = MY_MAX(0, tdp->GetQuoted());
+ //Rows = tdp->GetElemt();
Catver = tdp->Catver;
- Memory = tdp->Memory;
- Scrollable = tdp->Scrollable;
+ //Memory = tdp->Memory;
+ //Scrollable = tdp->Scrollable;
Ops.UseCnc = tdp->UseCnc;
} else {
Connect = NULL;
- TableName = NULL;
- Schema = NULL;
+ //TableName = NULL;
+ //Schema = NULL;
Ops.User = NULL;
Ops.Pwd = NULL;
- Catalog = NULL;
- Srcdef = NULL;
- Qrystr = NULL;
- Sep = 0;
- Options = 0;
+ //Catalog = NULL;
+ //Srcdef = NULL;
+ //Qrystr = NULL;
+ //Sep = 0;
+ //Options = 0;
Ops.Cto = DEFAULT_LOGIN_TIMEOUT;
Ops.Qto = DEFAULT_QUERY_TIMEOUT;
- Quoted = 0;
- Rows = 0;
+ //Quoted = 0;
+ //Rows = 0;
Catver = 0;
- Memory = 0;
- Scrollable = false;
+ //Memory = 0;
+ //Scrollable = false;
Ops.UseCnc = false;
} // endif tdp
- Quote = NULL;
- Query = NULL;
- Count = NULL;
+ //Quote = NULL;
+ //Query = NULL;
+ //Count = NULL;
//Where = NULL;
- MulConn = NULL;
- DBQ = NULL;
- Qrp = NULL;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = 0;
- Nparm = 0;
- Placed = false;
+ //MulConn = NULL;
+ //DBQ = NULL;
+ //Qrp = NULL;
+ //Fpos = 0;
+ //Curpos = 0;
+ //AftRows = 0;
+ //CurNum = 0;
+ //Rbuf = 0;
+ //BufSize = 0;
+ //Nparm = 0;
+ //Placed = false;
} // end of TDBODBC standard constructor
-TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBASE(tdbp)
+TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp)
{
Ocp = tdbp->Ocp; // is that right ?
Cnp = tdbp->Cnp;
Connect = tdbp->Connect;
- TableName = tdbp->TableName;
- Schema = tdbp->Schema;
+ //TableName = tdbp->TableName;
+ //Schema = tdbp->Schema;
Ops = tdbp->Ops;
- Catalog = tdbp->Catalog;
- Srcdef = tdbp->Srcdef;
- Qrystr = tdbp->Qrystr;
- Memory = tdbp->Memory;
- Scrollable = tdbp->Scrollable;
- Quote = tdbp->Quote;
- Query = tdbp->Query;
- Count = tdbp->Count;
+ //Catalog = tdbp->Catalog;
+ //Srcdef = tdbp->Srcdef;
+ //Qrystr = tdbp->Qrystr;
+ //Memory = tdbp->Memory;
+ //Scrollable = tdbp->Scrollable;
+ //Quote = tdbp->Quote;
+ //Query = tdbp->Query;
+ //Count = tdbp->Count;
//Where = tdbp->Where;
- MulConn = tdbp->MulConn;
- DBQ = tdbp->DBQ;
- Options = tdbp->Options;
- Quoted = tdbp->Quoted;
- Rows = tdbp->Rows;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = tdbp->BufSize;
- Nparm = tdbp->Nparm;
- Qrp = tdbp->Qrp;
- Placed = false;
+ //MulConn = tdbp->MulConn;
+ //DBQ = tdbp->DBQ;
+ //Options = tdbp->Options;
+ //Quoted = tdbp->Quoted;
+ //Rows = tdbp->Rows;
+ //Fpos = 0;
+ //Curpos = 0;
+ //AftRows = 0;
+ //CurNum = 0;
+ //Rbuf = 0;
+ //BufSize = tdbp->BufSize;
+ //Nparm = tdbp->Nparm;
+ //Qrp = tdbp->Qrp;
+ //Placed = false;
} // end of TDBODBC copy constructor
// Method
-PTDB TDBODBC::CopyOne(PTABS t)
+PTDB TDBODBC::Clone(PTABS t)
{
PTDB tp;
PODBCCOL cp1, cp2;
@@ -386,6 +389,7 @@ void TDBODBC::SetFile(PGLOBAL g, PSZ fn)
DBQ = fn;
} // end of SetFile
+#if 0
/******************************************************************/
/* Convert an UTF-8 string to latin characters. */
/******************************************************************/
@@ -414,7 +418,15 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
PCOL colp;
if (Srcdef) {
- Query = new(g)STRING(g, 0, Srcdef);
+ if (strstr(Srcdef, "%s")) {
+ char *fil;
+
+ fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1");
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil));
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
return false;
} // endif Srcdef
@@ -442,7 +454,8 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
} else
oom |= Query->Append(buf);
- } // endif colp
+ ((PEXTCOL)colp)->SetRank(++Ncol);
+ } // endif colp
} else
// !Columns can occur for queries such that sql count(*) from...
@@ -458,10 +471,6 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
if (Catalog && *Catalog)
catp = Catalog;
- // Following lines are commented because of MSDEV-10520
- // Indeed the schema in the tablep is the local table database and
- // is normally not related to the remote table database.
- // TODO: Try to remember why this was done and if it was useful in some case.
//if (tablep->GetSchema())
// schmp = (char*)tablep->GetSchema();
//else
@@ -516,6 +525,7 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
return false;
} // end of MakeSQL
+#endif // 0
/***********************************************************************/
/* MakeInsert: make the Insert statement used with ODBC connection. */
@@ -536,7 +546,7 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
// Column name can be encoded in UTF-8
Decode(colp->GetName(), buf, sizeof(buf));
len += (strlen(buf) + 6); // comma + quotes + valist
- ((PODBCCOL)colp)->Rank = ++Nparm;
+ ((PEXTCOL)colp)->SetRank(++Nparm);
} // endif colp
// Below 32 is enough to contain the fixed part of the query
@@ -555,7 +565,7 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
if (schmp)
len += strlen(schmp) + 1;
- // Column name can be encoded in UTF-8
+ // Table name can be encoded in UTF-8
Decode(TableName, buf, sizeof(buf));
len += (strlen(buf) + 32);
Query = new(g) STRING(g, len, "INSERT INTO ");
@@ -634,6 +644,7 @@ bool TDBODBC::BindParameters(PGLOBAL g)
return false;
} // end of BindParameters
+#if 0
/***********************************************************************/
/* MakeCommand: make the Update or Delete statement to send to the */
/* MySQL server. Limited to remote values and filtering. */
@@ -664,19 +675,20 @@ bool TDBODBC::MakeCommand(PGLOBAL g)
// If so, it must be quoted in the original query
strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
- if (!strstr(" update delete low_priority ignore quick from ", name))
- strlwr(strcpy(name, Name)); // Not a keyword
- else
- strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
+ if (strstr(" update delete low_priority ignore quick from ", name)) {
+ strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
+ k += 2;
+ } else
+ strlwr(strcpy(name, Name)); // Not a keyword
if ((p = strstr(qrystr, name))) {
for (i = 0; i < p - qrystr; i++)
stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i];
stmt[i] = 0;
- k = i + (int)strlen(Name);
+ k += i + (int)strlen(Name);
- if (qtd && *(p-1) == ' ')
+ if (qtd && *(p - 1) == ' ')
strcat(strcat(strcat(stmt, qc), TableName), qc);
else
strcat(stmt, TableName);
@@ -692,15 +704,14 @@ bool TDBODBC::MakeCommand(PGLOBAL g)
} else {
sprintf(g->Message, "Cannot use this %s command",
- (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return false;
+ (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
+ return true;
} // endif p
Query = new(g) STRING(g, 0, stmt);
return (!Query->GetSize());
} // end of MakeCommand
-#if 0
/***********************************************************************/
/* MakeUpdate: make the SQL statement to send to ODBC connection. */
/***********************************************************************/
@@ -818,6 +829,7 @@ int TDBODBC::Cardinality(PGLOBAL g)
return Cardinal;
} // end of Cardinality
+#if 0
/***********************************************************************/
/* ODBC GetMaxSize: returns table size estimate in number of lines. */
/***********************************************************************/
@@ -844,6 +856,7 @@ int TDBODBC::GetProgMax(PGLOBAL g)
{
return GetMaxSize(g);
} // end of GetProgMax
+#endif // 0
/***********************************************************************/
/* ODBC Access Method opening routine. */
@@ -981,6 +994,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
return false;
} // end of OpenDB
+#if 0
/***********************************************************************/
/* GetRecpos: return the position of last read record. */
/***********************************************************************/
@@ -988,6 +1002,7 @@ int TDBODBC::GetRecpos(void)
{
return Fpos;
} // end of GetRecpos
+#endif // 0
/***********************************************************************/
/* SetRecpos: set the position of next read record. */
@@ -1081,8 +1096,9 @@ int TDBODBC::ReadDB(PGLOBAL g)
int rc;
if (trace > 1)
- htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
+ htrc("ODBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
+ //htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
+ // GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE) {
if (!Query && MakeCommand(g))
@@ -1102,11 +1118,11 @@ int TDBODBC::ReadDB(PGLOBAL g)
} // endif Mode
- if (To_Kindex) {
- // Direct access of ODBC tables is not implemented yet
- strcpy(g->Message, MSG(NO_ODBC_DIRECT));
- return RC_FX;
- } // endif To_Kindex
+ //if (To_Kindex) {
+ // // Direct access of ODBC tables is not implemented yet
+ // strcpy(g->Message, MSG(NO_ODBC_DIRECT));
+ // return RC_FX;
+ // } // endif To_Kindex
/*********************************************************************/
/* Now start the reading process. */
@@ -1212,70 +1228,58 @@ void TDBODBC::CloseDB(PGLOBAL g)
/* ODBCCOL public constructor. */
/***********************************************************************/
ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+ : EXTCOL(cdp, tdbp, cprec, i, am)
{
- if (cprec) {
- Next = cprec->GetNext();
- cprec->SetNext(this);
- } else {
- Next = tdbp->GetColumns();
- tdbp->SetColumns(this);
- } // endif cprec
-
// Set additional ODBC access method information for column.
- Crp = NULL;
-//Long = cdp->GetLong();
- Long = Precision;
+//Crp = NULL;
+//Long = Precision;
//strcpy(F_Date, cdp->F_Date);
- To_Val = NULL;
+//To_Val = NULL;
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 0; // Not known yet
-
- if (trace)
- htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this);
-
+//Bufp = NULL;
+//Blkp = NULL;
+//Rank = 0; // Not known yet
} // end of ODBCCOL constructor
/***********************************************************************/
/* ODBCCOL private constructor. */
/***********************************************************************/
-ODBCCOL::ODBCCOL(void) : COLBLK()
+ODBCCOL::ODBCCOL(void) : EXTCOL()
{
- Crp = NULL;
- Buf_Type = TYPE_INT; // This is a count(*) column
- // Set additional Dos access method information for column.
- Long = sizeof(int);
- To_Val = NULL;
+//Crp = NULL;
+//Buf_Type = TYPE_INT; // This is a count(*) column
+//// Set additional Dos access method information for column.
+//Long = sizeof(int);
+//To_Val = NULL;
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 1;
+//Bufp = NULL;
+//Blkp = NULL;
+//Rank = 1;
} // end of ODBCCOL constructor
/***********************************************************************/
/* ODBCCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
-ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
+ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
{
- Crp = col1->Crp;
- Long = col1->Long;
+//Crp = col1->Crp;
+//Long = col1->Long;
//strcpy(F_Date, col1->F_Date);
- To_Val = col1->To_Val;
+//To_Val = col1->To_Val;
Slen = col1->Slen;
StrLen = col1->StrLen;
Sqlbuf = col1->Sqlbuf;
- Bufp = col1->Bufp;
- Blkp = col1->Blkp;
- Rank = col1->Rank;
+//Bufp = col1->Bufp;
+//Blkp = col1->Blkp;
+//Rank = col1->Rank;
} // end of ODBCCOL copy constructor
+#if 0
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
@@ -1321,6 +1325,7 @@ bool ODBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
Status = (ok) ? BUF_EMPTY : BUF_NO;
return false;
} // end of SetBuffer
+#endif // 0
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
@@ -1526,7 +1531,7 @@ TDBXDBC::TDBXDBC(PTDBXDBC tdbp) : TDBODBC(tdbp)
Nerr = tdbp->Nerr;
} // end of TDBXDBC copy constructor
-PTDB TDBXDBC::CopyOne(PTABS t)
+PTDB TDBXDBC::Clone(PTABS t)
{
PTDB tp;
PXSRCCOL cp1, cp2;
diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h
index aa6592d8abf..fcefad5647b 100644
--- a/storage/connect/tabodbc.h
+++ b/storage/connect/tabodbc.h
@@ -20,7 +20,7 @@ typedef class TDBSRC *PTDBSRC;
/***********************************************************************/
/* ODBC table. */
/***********************************************************************/
-class DllExport ODBCDEF : public TABDEF { /* Logical table description */
+class DllExport ODBCDEF : public EXTDEF { /* Logical table description */
friend class TDBODBC;
friend class TDBXDBC;
friend class TDBDRV;
@@ -33,14 +33,14 @@ public:
// Implementation
virtual const char *GetType(void) {return "ODBC";}
PSZ GetConnect(void) {return Connect;}
- PSZ GetTabname(void) {return Tabname;}
- PSZ GetTabschema(void) {return Tabschema;}
- PSZ GetTabcat(void) {return Tabcat;}
- PSZ GetSrcdef(void) {return Srcdef;}
- char GetSep(void) {return (Sep) ? *Sep : 0;}
- int GetQuoted(void) {return Quoted;}
+ //PSZ GetTabname(void) {return Tabname;}
+ //PSZ GetTabschema(void) {return Tabschema;}
+ //PSZ GetTabcat(void) {return Tabcat;}
+ //PSZ GetSrcdef(void) {return Srcdef;}
+ //char GetSep(void) {return (Sep) ? *Sep : 0;}
+ //int GetQuoted(void) {return Quoted;}
int GetCatver(void) {return Catver;}
- int GetOptions(void) {return Options;}
+ //int GetOptions(void) {return Options;}
// Methods
virtual int Indexable(void) {return 2;}
@@ -50,27 +50,27 @@ public:
protected:
// Members
PSZ Connect; /* ODBC connection string */
- PSZ Tabname; /* External table name */
- PSZ Tabschema; /* External table schema */
- PSZ Username; /* User connect name */
- PSZ Password; /* Password connect info */
- PSZ Tabcat; /* External table catalog */
- PSZ Tabtyp; /* Catalog table type */
- PSZ Colpat; /* Catalog column pattern */
- PSZ Srcdef; /* The source table SQL definition */
- PSZ Qchar; /* Identifier quoting character */
- PSZ Qrystr; /* The original query */
- PSZ Sep; /* Decimal separator */
+ //PSZ Tabname; /* External table name */
+ //PSZ Tabschema; /* External table schema */
+ //PSZ Username; /* User connect name */
+ //PSZ Password; /* Password connect info */
+ //PSZ Tabcat; /* External table catalog */
+ //PSZ Tabtyp; /* Catalog table type */
+ //PSZ Colpat; /* Catalog column pattern */
+ //PSZ Srcdef; /* The source table SQL definition */
+ //PSZ Qchar; /* Identifier quoting character */
+ //PSZ Qrystr; /* The original query */
+ //PSZ Sep; /* Decimal separator */
int Catver; /* ODBC version for catalog functions */
- int Options; /* Open connection options */
- int Cto; /* Open connection timeout */
- int Qto; /* Query (command) timeout */
- int Quoted; /* Identifier quoting level */
- int Maxerr; /* Maxerr for an Exec table */
- int Maxres; /* Maxres for a catalog table */
- int Memory; /* Put result set in memory */
- bool Scrollable; /* Use scrollable cursor */
- bool Xsrc; /* Execution type */
+ //int Options; /* Open connection options */
+ //int Cto; /* Open connection timeout */
+ //int Qto; /* Query (command) timeout */
+ //int Quoted; /* Identifier quoting level */
+ //int Maxerr; /* Maxerr for an Exec table */
+ //int Maxres; /* Maxres for a catalog table */
+ //int Memory; /* Put result set in memory */
+ //bool Scrollable; /* Use scrollable cursor */
+ //bool Xsrc; /* Execution type */
bool UseCnc; /* Use SQLConnect (!SQLDriverConnect) */
}; // end of ODBCDEF
@@ -81,7 +81,7 @@ public:
/* This is the ODBC Access Method class declaration for files from */
/* other DB drivers to be accessed via ODBC. */
/***********************************************************************/
-class TDBODBC : public TDBASE {
+class TDBODBC : public TDBEXT {
friend class ODBCCOL;
friend class ODBConn;
public:
@@ -95,8 +95,8 @@ class TDBODBC : public TDBASE {
{return (PTDB)new(g) TDBODBC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
- virtual int GetRecpos(void);
+ virtual PTDB Clone(PTABS t);
+//virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
virtual PSZ GetFile(PGLOBAL g);
virtual void SetFile(PGLOBAL g, PSZ fn);
@@ -108,8 +108,8 @@ class TDBODBC : public TDBASE {
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
- virtual int GetProgMax(PGLOBAL g);
+//virtual int GetMaxSize(PGLOBAL g);
+//virtual int GetProgMax(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -119,10 +119,10 @@ class TDBODBC : public TDBASE {
protected:
// Internal functions
- int Decode(char *utf, char *buf, size_t n);
- bool MakeSQL(PGLOBAL g, bool cnt);
+//int Decode(char *utf, char *buf, size_t n);
+//bool MakeSQL(PGLOBAL g, bool cnt);
bool MakeInsert(PGLOBAL g);
- bool MakeCommand(PGLOBAL g);
+//virtual bool MakeCommand(PGLOBAL g);
//bool MakeFilter(PGLOBAL g, bool c);
bool BindParameters(PGLOBAL g);
//char *MakeUpdate(PGLOBAL g);
@@ -132,46 +132,16 @@ class TDBODBC : public TDBASE {
ODBConn *Ocp; // Points to an ODBC connection class
ODBCCOL *Cnp; // Points to count(*) column
ODBCPARM Ops; // Additional parameters
- PSTRG Query; // Constructed SQL query
char *Connect; // Points to connection string
- char *TableName; // Points to ODBC table name
- char *Schema; // Points to ODBC table Schema
- char *User; // User connect info
- char *Pwd; // Password connect info
- char *Catalog; // Points to ODBC table Catalog
- char *Srcdef; // The source table SQL definition
- char *Count; // Points to count(*) SQL statement
-//char *Where; // Points to local where clause
- char *Quote; // The identifier quoting character
- char *MulConn; // Used for multiple ODBC tables
- char *DBQ; // The address part of Connect string
- char *Qrystr; // The original query
- char Sep; // The decimal separator
- int Options; // Connect options
- int Cto; // Connect timeout
- int Qto; // Query timeout
- int Quoted; // The identifier quoting level
- int Fpos; // Position of last read record
- int Curpos; // Cursor position of last fetch
- int AftRows; // The number of affected rows
- int Rows; // Rowset size
int Catver; // Catalog ODBC version
- int CurNum; // Current buffer line number
- int Rbuf; // Number of lines read in buffer
- int BufSize; // Size of connect string buffer
- int Nparm; // The number of statement parameters
- int Memory; // 0: No 1: Alloc 2: Put 3: Get
- bool Scrollable; // Use scrollable cursor
- bool Placed; // True for position reading
bool UseCnc; // Use SQLConnect (!SQLDriverConnect)
- PQRYRES Qrp; // Points to storage result
}; // end of class TDBODBC
/***********************************************************************/
/* Class ODBCCOL: ODBC access method column descriptor. */
/* This A.M. is used for ODBC tables. */
/***********************************************************************/
-class ODBCCOL : public COLBLK {
+class ODBCCOL : public EXTCOL {
friend class TDBODBC;
public:
// Constructors
@@ -181,12 +151,12 @@ class ODBCCOL : public COLBLK {
// Implementation
virtual int GetAmType(void) {return TYPE_AM_ODBC;}
SQLLEN *GetStrLen(void) {return StrLen;}
- int GetRank(void) {return Rank;}
+// int GetRank(void) {return Rank;}
// PVBLK GetBlkp(void) {return Blkp;}
- void SetCrp(PCOLRES crp) {Crp = crp;}
+// void SetCrp(PCOLRES crp) {Crp = crp;}
// Methods
- virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+//virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
void AllocateBuffers(PGLOBAL g, int rows);
@@ -195,19 +165,19 @@ class ODBCCOL : public COLBLK {
// void Print(PGLOBAL g, FILE *, uint);
protected:
- // Constructor used by GetMaxSize
+ // Constructor for count(*) column
ODBCCOL(void);
// Members
TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's
- PCOLRES Crp; // To storage result
- void *Bufp; // To extended buffer
- PVBLK Blkp; // To Value Block
+//PCOLRES Crp; // To storage result
+//void *Bufp; // To extended buffer
+//PVBLK Blkp; // To Value Block
//char F_Date[12]; // Internal Date format
- PVAL To_Val; // To value used for Insert
+//PVAL To_Val; // To value used for Insert
SQLLEN *StrLen; // As returned by ODBC
SQLLEN Slen; // Used with Fetch
- int Rank; // Rank (position) number in the query
+//int Rank; // Rank (position) number in the query
}; // end of class ODBCCOL
/***********************************************************************/
@@ -228,28 +198,19 @@ class TDBXDBC : public TDBODBC {
{return (PTDB)new(g) TDBXDBC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
-//virtual int GetRecpos(void);
-//virtual PSZ GetFile(PGLOBAL g);
-//virtual void SetFile(PGLOBAL g, PSZ fn);
-//virtual void ResetSize(void);
-//virtual int GetAffectedRows(void) {return AftRows;}
-//virtual PSZ GetServer(void) {return "ODBC";}
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
-//virtual int GetProgMax(PGLOBAL g);
virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
virtual int DeleteDB(PGLOBAL g, int irc);
-//virtual void CloseDB(PGLOBAL g);
protected:
// Internal functions
PCMD MakeCMD(PGLOBAL g);
-//bool BindParameters(PGLOBAL g);
// Members
PCMD Cmdlist; // The commands to execute
diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp
index 256b454741c..c6d32884417 100644
--- a/storage/connect/tabpivot.cpp
+++ b/storage/connect/tabpivot.cpp
@@ -1,11 +1,11 @@
/************ TabPivot C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABPIVOT */
/* ------------- */
-/* Version 1.6 */
+/* Version 1.7 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -41,6 +41,7 @@
#include "global.h"
#include "plgdbsem.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabcol.h"
#include "colblk.h"
#include "tabmysql.h"
@@ -883,7 +884,7 @@ SRCCOL::SRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int n)
/***********************************************************************/
/* Initialize the column as pointing to the source column. */
/***********************************************************************/
-bool SRCCOL::Init(PGLOBAL g, PTDBASE tp)
+bool SRCCOL::Init(PGLOBAL g, PTDB tp)
{
if (PRXCOL::Init(g, tp))
return true;
diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h
index c397af05234..07d5c3e456b 100644
--- a/storage/connect/tabpivot.h
+++ b/storage/connect/tabpivot.h
@@ -183,7 +183,7 @@ class SRCCOL : public PRXCOL {
using PRXCOL::Init;
virtual void Reset(void) {}
void SetColumn(void);
- virtual bool Init(PGLOBAL g, PTDBASE tp);
+ virtual bool Init(PGLOBAL g, PTDB tp);
bool CompareLast(void);
protected:
diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp
index 76890e84429..2ddd1c3c753 100644
--- a/storage/connect/tabsys.cpp
+++ b/storage/connect/tabsys.cpp
@@ -159,7 +159,7 @@ TDBINI::TDBINI(PTDBINI tdbp) : TDBASE(tdbp)
} // end of TDBINI copy constructor
// Is this really useful ???
-PTDB TDBINI::CopyOne(PTABS t)
+PTDB TDBINI::Clone(PTABS t)
{
PTDB tp;
PINICOL cp1, cp2;
@@ -173,7 +173,7 @@ PTDB TDBINI::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Get the section list from the INI file. */
@@ -565,7 +565,7 @@ TDBXIN::TDBXIN(PTDBXIN tdbp) : TDBINI(tdbp)
} // end of TDBXIN copy constructor
// Is this really useful ???
-PTDB TDBXIN::CopyOne(PTABS t)
+PTDB TDBXIN::Clone(PTABS t)
{
PTDB tp;
PXINCOL cp1, cp2;
@@ -579,7 +579,7 @@ PTDB TDBXIN::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Get the key list from the INI file. */
diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h
index 6b454322906..ff1b8335690 100644
--- a/storage/connect/tabsys.h
+++ b/storage/connect/tabsys.h
@@ -57,7 +57,7 @@ class TDBINI : public TDBASE {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBINI(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return N;}
virtual int GetProgCur(void) {return N;}
//virtual int GetAffectedRows(void) {return 0;}
@@ -136,7 +136,7 @@ class TDBXIN : public TDBINI {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXIN(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
virtual void ResetDB(void)
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp
index e3baf7c3da5..0bf3f6beb43 100644
--- a/storage/connect/tabtbl.cpp
+++ b/storage/connect/tabtbl.cpp
@@ -1,11 +1,11 @@
/************* TabTbl C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABTBL */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to PlugDB Software Development 2008-2016 */
+/* (C) Copyright to PlugDB Software Development 2008-2017 */
/* Author: Olivier BERTRAND */
/* */
/* WHAT THIS PROGRAM DOES: */
@@ -70,6 +70,7 @@
#include "tabcol.h"
#include "tabdos.h" // TDBDOS and DOSCOL class dcls
#include "tabtbl.h"
+#include "tabext.h"
#include "tabmysql.h"
#include "ha_connect.h"
@@ -411,9 +412,9 @@ void TDBTBL::ResetDB(void)
colp->COLBLK::Reset();
for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext())
- ((PTDBASE)tabp->GetTo_Tdb())->ResetDB();
+ tabp->GetTo_Tdb()->ResetDB();
- Tdbp = (PTDBASE)Tablist->GetTo_Tdb();
+ Tdbp = Tablist->GetTo_Tdb();
Crp = 0;
} // end of ResetDB
@@ -458,7 +459,7 @@ bool TDBTBL::OpenDB(PGLOBAL g)
return TRUE;
if ((CurTable = Tablist)) {
- Tdbp = (PTDBASE)CurTable->GetTo_Tdb();
+ Tdbp = CurTable->GetTo_Tdb();
// Tdbp->SetMode(Mode);
// Tdbp->ResetDB();
// Tdbp->ResetSize();
@@ -515,7 +516,7 @@ int TDBTBL::ReadDB(PGLOBAL g)
/* Continue reading from next table file. */
/***************************************************************/
Tdbp->CloseDB(g);
- Tdbp = (PTDBASE)CurTable->GetTo_Tdb();
+ Tdbp = CurTable->GetTo_Tdb();
// Check and initialize the subtable columns
for (PCOL cp = Columns; cp; cp = cp->GetNext())
@@ -609,13 +610,13 @@ void TDBTBM::ResetDB(void)
// Local tables
for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext())
- ((PTDBASE)tabp->GetTo_Tdb())->ResetDB();
+ tabp->GetTo_Tdb()->ResetDB();
// Remote tables
for (PTBMT tp = Tmp; tp; tp = tp->Next)
- ((PTDBASE)tp->Tap->GetTo_Tdb())->ResetDB();
+ tp->Tap->GetTo_Tdb()->ResetDB();
- Tdbp = (Tablist) ? (PTDBASE)Tablist->GetTo_Tdb() : NULL;
+ Tdbp = (Tablist) ? Tablist->GetTo_Tdb() : NULL;
Crp = 0;
} // end of ResetDB
@@ -716,7 +717,7 @@ bool TDBTBM::OpenDB(PGLOBAL g)
/* Proceed with local tables. */
/*********************************************************************/
if ((CurTable = Tablist)) {
- Tdbp = (PTDBASE)CurTable->GetTo_Tdb();
+ Tdbp = CurTable->GetTo_Tdb();
// Tdbp->SetMode(Mode);
// Check and initialize the subtable columns
@@ -808,7 +809,7 @@ int TDBTBM::ReadNextRemote(PGLOBAL g)
} // endif Curtable
- Tdbp = (PTDBASE)Cmp->Tap->GetTo_Tdb();
+ Tdbp = Cmp->Tap->GetTo_Tdb();
// Check and initialize the subtable columns
for (PCOL cp = Columns; cp; cp = cp->GetNext())
diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp
index 4069cdbed2a..762c61bd1a1 100644
--- a/storage/connect/tabutil.cpp
+++ b/storage/connect/tabutil.cpp
@@ -1,7 +1,7 @@
/************* Tabutil cpp Declares Source Code File (.CPP) ************/
-/* Name: TABUTIL.CPP Version 1.1 */
+/* Name: TABUTIL.CPP Version 1.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2013 - 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2013 - 2017 */
/* */
/* Utility function used by the PROXY, XCOL, OCCUR, and TBL tables. */
/***********************************************************************/
@@ -45,8 +45,9 @@
#include "myutil.h"
#include "valblk.h"
#include "resource.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabmysql.h"
#include "tabcol.h"
#include "tabutil.h"
@@ -356,7 +357,7 @@ TDBPRX::TDBPRX(PTDBPRX tdbp) : TDBASE(tdbp)
} // end of TDBPRX copy constructor
// Method
-PTDB TDBPRX::CopyOne(PTABS t)
+PTDB TDBPRX::Clone(PTABS t)
{
PTDB tp;
PPRXCOL cp1, cp2;
@@ -370,12 +371,12 @@ PTDB TDBPRX::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Get the PTDB of the sub-table. */
/***********************************************************************/
-PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b)
+PTDB TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b)
{
const char *sp = NULL;
char *db, *name;
@@ -456,13 +457,13 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b)
if (trace && tdbp)
htrc("Subtable %s in %s\n",
- name, SVP(((PTDBASE)tdbp)->GetDef()->GetDB()));
+ name, SVP(tdbp->GetDef()->GetDB()));
err:
if (s)
free_table_share(s);
- return (PTDBASE)tdbp;
+ return tdbp;
} // end of GetSubTable
/***********************************************************************/
@@ -560,9 +561,9 @@ bool TDBPRX::OpenDB(PGLOBAL g)
/* its column blocks in mode write (required by XML tables). */
/*********************************************************************/
if (Mode == MODE_UPDATE) {
- PTDBASE utp;
+ PTDB utp;
- if (!(utp= (PTDBASE)Tdbp->Duplicate(g))) {
+ if (!(utp= Tdbp->Duplicate(g))) {
sprintf(g->Message, MSG(INV_UPDT_TABLE), Tdbp->GetName());
return true;
} // endif tp
@@ -681,7 +682,7 @@ char *PRXCOL::Decode(PGLOBAL g, const char *cnm)
/* PRXCOL initialization routine. */
/* Look for the matching column in the object table. */
/***********************************************************************/
-bool PRXCOL::Init(PGLOBAL g, PTDBASE tp)
+bool PRXCOL::Init(PGLOBAL g, PTDB tp)
{
if (!tp)
tp = ((PTDBPRX)To_Tdb)->Tdbp;
diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h
index b320d169b36..8e56aecff86 100644
--- a/storage/connect/tabutil.h
+++ b/storage/connect/tabutil.h
@@ -67,7 +67,7 @@ class DllExport TDBPRX : public TDBASE {
{return (PTDB)new(g) TDBPRX(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return Tdbp->GetRecpos();}
virtual void ResetDB(void) {Tdbp->ResetDB();}
virtual int RowNumber(PGLOBAL g, bool b = FALSE);
@@ -83,12 +83,12 @@ class DllExport TDBPRX : public TDBASE {
virtual int WriteDB(PGLOBAL g);
virtual int DeleteDB(PGLOBAL g, int irc);
virtual void CloseDB(PGLOBAL g) {if (Tdbp) Tdbp->CloseDB(g);}
- PTDBASE GetSubTable(PGLOBAL g, PTABLE tabp, bool b = false);
+ PTDB GetSubTable(PGLOBAL g, PTABLE tabp, bool b = false);
void RemoveNext(PTABLE tp);
protected:
// Members
- PTDBASE Tdbp; // The object table
+ PTDB Tdbp; // The object table
}; // end of class TDBPRX
/***********************************************************************/
@@ -115,7 +115,7 @@ class DllExport PRXCOL : public COLBLK {
{return false;}
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
- virtual bool Init(PGLOBAL g, PTDBASE tp);
+ virtual bool Init(PGLOBAL g, PTDB tp);
protected:
char *Decode(PGLOBAL g, const char *cnm);
diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp
index e788529075f..282fb55a43c 100644
--- a/storage/connect/tabvct.cpp
+++ b/storage/connect/tabvct.cpp
@@ -241,7 +241,7 @@ PTDB VCTDEF::GetTable(PGLOBAL g, MODE mode)
/*********************************************************************/
if (mode != MODE_INSERT)
if (tdbp->GetBlockValues(g))
- PushWarning(g, (PTDBASE)tdbp);
+ PushWarning(g, tdbp);
// return NULL; // causes a crash when deleting index
return tdbp;
@@ -263,7 +263,7 @@ TDBVCT::TDBVCT(PGLOBAL g, PTDBVCT tdbp) : TDBFIX(g, tdbp)
} // end of TDBVCT copy constructor
// Method
-PTDB TDBVCT::CopyOne(PTABS t)
+PTDB TDBVCT::Clone(PTABS t)
{
PTDB tp;
PVCTCOL cp1, cp2;
@@ -277,7 +277,7 @@ PTDB TDBVCT::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate VCT column description block. */
diff --git a/storage/connect/tabvct.h b/storage/connect/tabvct.h
index 8ad3c8e21be..189a9ae2221 100644
--- a/storage/connect/tabvct.h
+++ b/storage/connect/tabvct.h
@@ -68,7 +68,7 @@ class DllExport TDBVCT : public TDBFIX {
bool IsSplit(void) {return ((VCTDEF*)To_Def)->Split;}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual bool IsUsingTemp(PGLOBAL g);
// Database routines
diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp
index 356fc981357..155c71fe268 100644
--- a/storage/connect/tabvir.cpp
+++ b/storage/connect/tabvir.cpp
@@ -20,7 +20,7 @@
#include "plgdbsem.h"
#include "filter.h"
#include "xtable.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "colblk.h"
#include "mycat.h" // for FNC_COL
#include "tabvir.h"
diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp
index 98a44b9d635..4871a1d66dc 100644
--- a/storage/connect/tabwmi.cpp
+++ b/storage/connect/tabwmi.cpp
@@ -1,5 +1,5 @@
/***********************************************************************/
-/* TABWMI: Author Olivier Bertrand -- PlugDB -- 2012 - 2013 */
+/* TABWMI: Author Olivier Bertrand -- PlugDB -- 2012 - 2017 */
/* TABWMI: Virtual table to get WMI information. */
/***********************************************************************/
#if !defined(__WIN__)
@@ -11,8 +11,9 @@
#include "global.h"
#include "plgdbsem.h"
#include "mycat.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
+#include "tabext.h"
#include "colblk.h"
//#include "filter.h"
//#include "xindex.h"
@@ -62,7 +63,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
if (FAILED(res)) {
sprintf(g->Message, "Failed to initialize COM library. "
- "Error code = %p", res);
+ "Error code = %x", res);
return NULL;
} // endif res
@@ -85,7 +86,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
(void**) &loc);
if (FAILED(res)) {
sprintf(g->Message, "Failed to create Locator. "
- "Error code = %p", res);
+ "Error code = %x", res);
CoUninitialize();
return NULL;
} // endif res
@@ -94,7 +95,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
NULL, NULL, NULL, 0, NULL, NULL, &wp->Svc);
if (FAILED(res)) {
- sprintf(g->Message, "Could not connect. Error code = %p", res);
+ sprintf(g->Message, "Could not connect. Error code = %x", res);
loc->Release();
CoUninitialize();
return NULL;
@@ -423,7 +424,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
if (FAILED(Res)) {
sprintf(g->Message, "Failed to initialize COM library. "
- "Error code = %p", Res);
+ "Error code = %x", Res);
return true; // Program has failed.
} // endif Res
@@ -436,7 +437,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
if (FAILED(Res)) {
sprintf(g->Message, "Failed to create Locator. "
- "Error code = %p", Res);
+ "Error code = %x", Res);
CoUninitialize();
return true; // Program has failed.
} // endif Res
@@ -448,7 +449,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
NULL, NULL,0, NULL, 0, 0, &Svc);
if (FAILED(Res)) {
- sprintf(g->Message, "Could not connect. Error code = %p", Res);
+ sprintf(g->Message, "Could not connect. Error code = %x", Res);
loc->Release();
CoUninitialize();
return true; // Program has failed.
@@ -463,7 +464,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE);
if (FAILED(Res)) {
- sprintf(g->Message, "Could not set proxy. Error code = 0x", Res);
+ sprintf(g->Message, "Could not set proxy. Error code = %x", Res);
Svc->Release();
CoUninitialize();
return true; // Program has failed.
@@ -573,7 +574,7 @@ bool TDBWMI::GetWMIInfo(PGLOBAL g)
NULL, &Enumerator);
if (FAILED(Rc)) {
- sprintf(g->Message, "Query %s failed. Error code = %p", cmd, Rc);
+ sprintf(g->Message, "Query %s failed. Error code = %x", cmd, Rc);
Svc->Release();
CoUninitialize();
return true; // Program has failed.
diff --git a/storage/connect/tabxcl.cpp b/storage/connect/tabxcl.cpp
index add61431493..93a24accc3c 100644
--- a/storage/connect/tabxcl.cpp
+++ b/storage/connect/tabxcl.cpp
@@ -1,7 +1,7 @@
/************* TabXcl CPP Declares Source Code File (.CPP) *************/
/* Name: TABXCL.CPP Version 1.0 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2013 */
+/* (C) Copyright to the author Olivier BERTRAND 2013-2017 */
/* */
/* XCOL: Table having one column containing several values */
/* comma separated. When creating the table, the name of the X */
@@ -45,12 +45,12 @@
#include "plgdbsem.h"
#include "plgcnx.h" // For DB types
#include "resource.h"
-#include "reldef.h"
+#include "xtable.h"
+#include "tabext.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabcol.h"
#include "tabxcl.h"
-#include "xtable.h"
#include "tabmysql.h"
#include "ha_connect.h"
@@ -246,7 +246,7 @@ XCLCOL::XCLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
/* XCLCOL initialization routine. */
/* Allocate Cbuf that will contain the Colp value. */
/***********************************************************************/
-bool XCLCOL::Init(PGLOBAL g, PTDBASE tp)
+bool XCLCOL::Init(PGLOBAL g, PTDB tp)
{
if (PRXCOL::Init(g, tp))
return true;
diff --git a/storage/connect/tabxcl.h b/storage/connect/tabxcl.h
index 291f0b4263a..fde000ee709 100644
--- a/storage/connect/tabxcl.h
+++ b/storage/connect/tabxcl.h
@@ -91,7 +91,7 @@ class XCLCOL : public PRXCOL {
using PRXCOL::Init;
virtual void Reset(void) {} // Evaluated only by TDBXCL
virtual void ReadColumn(PGLOBAL g);
- virtual bool Init(PGLOBAL g, PTDBASE tp = NULL);
+ virtual bool Init(PGLOBAL g, PTDB tp = NULL);
protected:
// Default constructor not to be used
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp
index 3b8229fcf51..52cf3d3812f 100644
--- a/storage/connect/tabxml.cpp
+++ b/storage/connect/tabxml.cpp
@@ -42,7 +42,7 @@
/***********************************************************************/
#include "global.h"
#include "plgdbsem.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
#include "colblk.h"
#include "mycat.h"
@@ -537,6 +537,11 @@ PTDB XMLDEF::GetTable(PGLOBAL g, MODE m)
if (Catfunc == FNC_COL)
return new(g) TDBXCT(this);
+ if (Zipped && !(m == MODE_READ || m == MODE_ANY)) {
+ strcpy(g->Message, "ZIpped XML tables are read only");
+ return NULL;
+ } // endif Zipped
+
PTDBASE tdbp = new(g) TDBXML(this);
if (Multiple)
@@ -655,7 +660,7 @@ TDBXML::TDBXML(PTDBXML tdbp) : TDBASE(tdbp)
} // end of TDBXML copy constructor
// Used for update
-PTDB TDBXML::CopyOne(PTABS t)
+PTDB TDBXML::Clone(PTABS t)
{
PTDB tp;
PXMLCOL cp1, cp2;
@@ -669,7 +674,7 @@ PTDB TDBXML::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate XML column description block. */
@@ -926,7 +931,7 @@ bool TDBXML::Initialize(PGLOBAL g)
if (rc)
sprintf(g->Message, "%s: %s", MSG(COM_ERROR), buf);
else
- sprintf(g->Message, "%s hr=%p", MSG(COM_ERROR), e.Error());
+ sprintf(g->Message, "%s hr=%x", MSG(COM_ERROR), e.Error());
goto error;
#endif // __WIN__
diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h
index 6c586d79dec..65b353072cb 100644
--- a/storage/connect/tabxml.h
+++ b/storage/connect/tabxml.h
@@ -71,7 +71,7 @@ class DllExport TDBXML : public TDBASE {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXML(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void);
virtual int GetProgCur(void) {return N;}
virtual PSZ GetFile(PGLOBAL g) {return Xfile;}
diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp
index 11f414ee154..b91059a3843 100644
--- a/storage/connect/tabzip.cpp
+++ b/storage/connect/tabzip.cpp
@@ -70,8 +70,12 @@ PCOL TDBZIP::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
/* param: filename path and the filename of the zip file to open. */
/* return: true if open, false otherwise. */
/***********************************************************************/
-bool TDBZIP::open(PGLOBAL g, const char *filename)
+bool TDBZIP::open(PGLOBAL g, const char *fn)
{
+ char filename[_MAX_PATH];
+
+ PlugSetPath(filename, fn, GetPath());
+
if (!zipfile && !(zipfile = unzOpen64(filename)))
sprintf(g->Message, "Zipfile open error");
@@ -102,7 +106,7 @@ int TDBZIP::Cardinality(PGLOBAL g)
unz_global_info64 ginfo;
int err = unzGetGlobalInfo64(zipfile, &ginfo);
- Cardinal = (err == UNZ_OK) ? ginfo.number_entry : 0;
+ Cardinal = (err == UNZ_OK) ? (int)ginfo.number_entry : 0;
} else
Cardinal = 0;
@@ -221,6 +225,14 @@ void ZIPCOL::ReadColumn(PGLOBAL g)
case 3:
Value->SetValue((int)Tdbz->finfo.compression_method);
break;
+ case 4:
+ Tdbz->finfo.tmu_date.tm_year -= 1900;
+
+ if (((DTVAL*)Value)->MakeTime((tm*)&Tdbz->finfo.tmu_date))
+ Value->SetNull(true);
+
+ Tdbz->finfo.tmu_date.tm_year += 1900;
+ break;
default:
Value->SetValue_psz((PSZ)Tdbz->fn);
} // endswitch flag
diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h
index 6f1735258e7..dcec3475371 100644
--- a/storage/connect/tabzip.h
+++ b/storage/connect/tabzip.h
@@ -20,7 +20,7 @@ typedef class ZIPCOL *PZIPCOL;
/***********************************************************************/
class DllExport ZIPDEF : public DOSDEF { /* Table description */
friend class TDBZIP;
- friend class ZIPFAM;
+ friend class UNZFAM;
public:
// Constructor
ZIPDEF(void) {}
diff --git a/storage/connect/value.h b/storage/connect/value.h
index a670ade4c28..14a568c3549 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -271,7 +271,7 @@ class DllExport TYPVAL<PSZ>: public VALUE {
virtual void Reset(void) {*Strp = 0;}
virtual int GetValLen(void) {return Len;};
virtual int GetValPrec() {return (Ci) ? 1 : 0;}
- virtual int GetSize(void) {return (Strp) ? strlen(Strp) : 0;}
+ virtual int GetSize(void) {return (Strp) ? (int)strlen(Strp) : 0;}
virtual PSZ GetCharValue(void) {return Strp;}
virtual char GetTinyValue(void);
virtual uchar GetUTinyValue(void);
diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp
index a2cf4e77b80..15fb71ab88a 100755
--- a/storage/connect/xindex.cpp
+++ b/storage/connect/xindex.cpp
@@ -81,7 +81,7 @@ int PlgMakeIndex(PGLOBAL g, PSZ name, PIXDEF pxdf, bool add)
{
int rc;
PTABLE tablep;
- PTDBASE tdbp;
+ PTDB tdbp;
PCATLG cat = PlgGetCatalog(g, true);
/*********************************************************************/
@@ -89,12 +89,12 @@ int PlgMakeIndex(PGLOBAL g, PSZ name, PIXDEF pxdf, bool add)
/*********************************************************************/
tablep = new(g) XTAB(name);
- if (!(tdbp = (PTDBASE)cat->GetTable(g, tablep)))
+ if (!(tdbp = cat->GetTable(g, tablep)))
rc = RC_NF;
else if (!tdbp->GetDef()->Indexable()) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), name);
rc = RC_NF;
- } else if ((rc = tdbp->MakeIndex(g, pxdf, add)) == RC_INFO)
+ } else if ((rc = ((PTDBASE)tdbp)->MakeIndex(g, pxdf, add)) == RC_INFO)
rc = RC_OK; // No or remote index
return rc;
@@ -2738,7 +2738,7 @@ bool XHUGE::Read(PGLOBAL g, void *buf, int n, int size)
} // endif nbr
} else {
- char *buf[256];
+ char buf[256];
DWORD drc = GetLastError();
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h
index ef2e934e5ee..2d10d72722e 100644
--- a/storage/connect/xindex.h
+++ b/storage/connect/xindex.h
@@ -184,7 +184,7 @@ class DllExport XXBASE : public CSORT, public BLOCK {
virtual bool IsRandom(void) {return true;}
virtual bool IsDynamic(void) {return Dynamic;}
virtual void SetDynamic(bool dyn) {Dynamic = dyn;}
- virtual bool HaveSame(void) {return false;}
+//virtual bool HaveSame(void) {return false;}
virtual int GetCurPos(void) {return Cur_K;}
virtual void SetNval(int n) {assert(n == 1);}
virtual void SetOp(OPVAL op) {Op = op;}
@@ -256,7 +256,7 @@ class DllExport XINDEX : public XXBASE {
// Implementation
virtual IDT GetType(void) {return TYPE_IDX_INDX;}
virtual bool IsMul(void) {return (Nval < Nk) ? true : Mul;}
- virtual bool HaveSame(void) {return Op == OP_SAME;}
+//virtual bool HaveSame(void) {return Op == OP_SAME;}
virtual int GetCurPos(void) {return (Pex) ? Pex[Cur_K] : Cur_K;}
virtual void SetNval(int n) {Nval = n;}
int GetMaxSame(void) {return MaxSame;}
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h
index d78cd09f9a4..8f6c23c4aeb 100644
--- a/storage/connect/xobject.h
+++ b/storage/connect/xobject.h
@@ -127,7 +127,8 @@ class DllExport STRING : public BLOCK {
// Implementation
inline int GetLength(void) {return (int)Length;}
- inline PSZ GetStr(void) {return Strp;}
+ inline void SetLength(uint n) {Length = n;}
+ inline PSZ GetStr(void) {return Strp;}
inline uint32 GetSize(void) {return Size;}
// Methods
diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h
index e18a08a54b8..4aeea05946a 100644
--- a/storage/connect/xtable.h
+++ b/storage/connect/xtable.h
@@ -1,7 +1,7 @@
/**************** Table H Declares Source Code File (.H) ***************/
-/* Name: TABLE.H Version 2.3 */
+/* Name: TABLE.H Version 2.4 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1999-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */
/* */
/* This file contains the TBX, OPJOIN and TDB class definitions. */
/***********************************************************************/
@@ -17,6 +17,7 @@
#include "block.h"
#include "colblk.h"
#include "m_ctype.h"
+#include "reldef.h"
typedef class CMD *PCMD;
typedef struct st_key_range key_range;
@@ -32,24 +33,30 @@ class CMD : public BLOCK {
char *Cmd;
}; // end of class CMD
+#if 0
// Condition filter structure
class CONDFIL : public BLOCK {
public:
// Constructor
CONDFIL(const Item *cond, uint idx, AMT type)
{
- Cond = cond; Idx = idx; Type = type; Body = NULL; Op = OP_XX; Cmds = NULL;
+ Cond = cond; Idx = idx; Type = type; Op = OP_XX;
+ Cmds = NULL; All = true; Body = NULL, Having = NULL;
}
// Members
const Item *Cond;
AMT Type;
uint Idx;
- char *Body;
OPVAL Op;
PCMD Cmds;
+ bool All;
+ char *Body;
+ char *Having;
}; // end of class CONDFIL
+#endif // 0
+typedef class EXTCOL *PEXTCOL;
typedef class CONDFIL *PCFIL;
typedef class TDBCAT *PTDBCAT;
typedef class CATCOL *PCATCOL;
@@ -64,47 +71,61 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
TDB(PTDB tdbp);
// Implementation
- static void SetTnum(int n) {Tnum = n;}
- inline PTDB GetOrig(void) {return To_Orig;}
- inline TUSE GetUse(void) {return Use;}
- inline PCFIL GetCondFil(void) {return To_CondFil;}
- inline LPCSTR GetName(void) {return Name;}
- inline PTABLE GetTable(void) {return To_Table;}
- inline PCOL GetColumns(void) {return Columns;}
- inline int GetDegree(void) {return Degree;}
- inline MODE GetMode(void) {return Mode;}
- inline PFIL GetFilter(void) {return To_Filter;}
- inline void SetFilter(PFIL fp) {To_Filter = fp;}
- inline void SetOrig(PTDB txp) {To_Orig = txp;}
- inline void SetUse(TUSE n) {Use = n;}
- inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;}
- inline void SetNext(PTDB tdbp) {Next = tdbp;}
- inline void SetName(LPCSTR name) {Name = name;}
- inline void SetTable(PTABLE tablep) {To_Table = tablep;}
- inline void SetColumns(PCOL colp) {Columns = colp;}
- inline void SetDegree(int degree) {Degree = degree;}
- inline void SetMode(MODE mode) {Mode = mode;}
+ static void SetTnum(int n) {Tnum = n;}
+ inline PTABDEF GetDef(void) {return To_Def;}
+ inline PTDB GetOrig(void) {return To_Orig;}
+ inline TUSE GetUse(void) {return Use;}
+ inline PCFIL GetCondFil(void) {return To_CondFil;}
+ inline LPCSTR GetName(void) {return Name;}
+ inline PTABLE GetTable(void) {return To_Table;}
+ inline PCOL GetColumns(void) {return Columns;}
+ inline int GetDegree(void) {return Degree;}
+ inline MODE GetMode(void) {return Mode;}
+ inline PFIL GetFilter(void) {return To_Filter;}
+ inline PCOL GetSetCols(void) {return To_SetCols;}
+ inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
+ inline void SetFilter(PFIL fp) {To_Filter = fp;}
+ inline void SetOrig(PTDB txp) {To_Orig = txp;}
+ inline void SetUse(TUSE n) {Use = n;}
+ inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;}
+ inline void SetNext(PTDB tdbp) {Next = tdbp;}
+ inline void SetName(LPCSTR name) {Name = name;}
+ inline void SetTable(PTABLE tablep) {To_Table = tablep;}
+ inline void SetColumns(PCOL colp) {Columns = colp;}
+ inline void SetDegree(int degree) {Degree = degree;}
+ inline void SetMode(MODE mode) {Mode = mode;}
// Properties
- virtual AMT GetAmType(void) {return TYPE_AM_ERROR;}
- virtual int GetTdb_No(void) {return Tdb_No;}
- virtual PTDB GetNext(void) {return Next;}
- virtual PCATLG GetCat(void) {return NULL;}
- virtual void SetAbort(bool) {;}
+ virtual AMT GetAmType(void) {return TYPE_AM_ERROR;}
+ virtual bool IsRemote(void) {return false;}
+ virtual bool IsIndexed(void) {return false;}
+ virtual int GetTdb_No(void) {return Tdb_No;}
+ virtual PTDB GetNext(void) {return Next;}
+ virtual PCATLG GetCat(void) {return NULL;}
+ virtual void SetAbort(bool) {;}
+ virtual PKXBASE GetKindex(void) {return NULL;}
// Methods
virtual bool IsSame(PTDB tp) {return tp == this;}
- virtual bool IsSpecial(PSZ name) = 0;
- virtual bool GetBlockValues(PGLOBAL) {return false;}
+ virtual bool IsSpecial(PSZ name);
+ virtual bool IsReadOnly(void) {return Read_Only;}
+ virtual bool IsView(void) {return FALSE;}
+ virtual PSZ GetPath(void);
+ virtual RECFM GetFtype(void) {return RECFM_NAF;}
+ virtual bool GetBlockValues(PGLOBAL) { return false; }
virtual int Cardinality(PGLOBAL) {return 0;}
- virtual int GetMaxSize(PGLOBAL) = 0;
+ virtual int GetRecpos(void) = 0;
+ virtual bool SetRecpos(PGLOBAL g, int recpos);
+ virtual int GetMaxSize(PGLOBAL) = 0;
virtual int GetProgMax(PGLOBAL) = 0;
- virtual int GetProgCur(void) = 0;
- virtual int RowNumber(PGLOBAL g, bool b = false);
- virtual bool IsReadOnly(void) {return true;}
- virtual const CHARSET_INFO *data_charset() {return NULL;}
+ virtual int GetProgCur(void) {return GetRecpos();}
+ virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
+ virtual void SetFile(PGLOBAL, PSZ) {}
+ virtual void ResetDB(void) {}
+ virtual void ResetSize(void) {MaxSize = -1;}
+ virtual int RowNumber(PGLOBAL g, bool b = false);
virtual PTDB Duplicate(PGLOBAL) {return NULL;}
- virtual PTDB CopyOne(PTABS) {return this;}
+ virtual PTDB Clone(PTABS) {return this;}
virtual PTDB Copy(PTABS t);
virtual void PrintAM(FILE *f, char *m)
{fprintf(f, "%s AM(%d)\n", m, GetAmType());}
@@ -112,10 +133,15 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
virtual void Print(PGLOBAL g, char *ps, uint z);
virtual PSZ GetServer(void) = 0;
virtual int GetBadLines(void) {return 0;}
+ virtual CHARSET_INFO *data_charset(void);
- // Database pure virtual routines
- virtual PCOL ColDB(PGLOBAL g, PSZ name, int num) = 0;
- virtual void MarkDB(PGLOBAL, PTDB) = 0;
+ // Database routines
+ virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
+ virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
+ {assert(false); return NULL;}
+ virtual PCOL InsertSpecialColumn(PCOL colp);
+ virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
+ virtual void MarkDB(PGLOBAL g, PTDB tdb2);
virtual bool OpenDB(PGLOBAL) = 0;
virtual int ReadDB(PGLOBAL) = 0;
virtual int WriteDB(PGLOBAL) = 0;
@@ -126,20 +152,26 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
protected:
// Members
- PTDB To_Orig; // Pointer to original if it is a copy
- TUSE Use;
- PFIL To_Filter;
- PCFIL To_CondFil; // To condition filter structure
- static int Tnum; // Used to generate Tdb_no's
- const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN
- PTDB Next; // Next in linearized queries
- PTABLE To_Table; // Points to the XTAB object
- LPCSTR Name; // Table name
- PCOL Columns; // Points to the first column of the table
- MODE Mode; // 10 Read, 30 Update, 40 Insert, 50 Delete
- int Degree; // Number of columns
- int Cardinal; // Table number of rows
- }; // end of class TDB
+ PTDB To_Orig; // Pointer to original if it is a copy
+ PTABDEF To_Def; // Points to catalog description block
+ TUSE Use;
+ PFIL To_Filter;
+ PCFIL To_CondFil; // To condition filter structure
+ static int Tnum; // Used to generate Tdb_no's
+ const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN
+ PTDB Next; // Next in linearized queries
+ PTABLE To_Table; // Points to the XTAB object
+ LPCSTR Name; // Table name
+ PCOL Columns; // Points to the first column of the table
+ PCOL To_SetCols; // Points to updated columns
+ MODE Mode; // 10 Read, 30 Update, 40 Insert, 50 Delete
+ int Degree; // Number of columns
+ int Cardinal; // Table number of rows
+ int MaxSize; // Max size in number of lines
+ bool Read_Only; // True for read only tables
+ const CHARSET_INFO *m_data_charset;
+ const char *csname; // Table charset name
+}; // end of class TDB
/***********************************************************************/
/* This is the base class for all query tables (except decode). */
@@ -155,50 +187,50 @@ class DllExport TDBASE : public TDB {
// Implementation
inline int GetKnum(void) {return Knum;}
- inline PTABDEF GetDef(void) {return To_Def;}
- inline PKXBASE GetKindex(void) {return To_Kindex;}
- inline PCOL GetSetCols(void) {return To_SetCols;}
- inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
+//inline PTABDEF GetDef(void) {return To_Def;}
+//inline PCOL GetSetCols(void) {return To_SetCols;}
+//inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
inline void SetKey_Col(PCOL *cpp) {To_Key_Col = cpp;}
inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;}
inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;}
// Properties
- void ResetKindex(PGLOBAL g, PKXBASE kxp);
+ virtual PKXBASE GetKindex(void) {return To_Kindex;}
+ void ResetKindex(PGLOBAL g, PKXBASE kxp);
PCOL Key(int i) {return (To_Key_Col) ? To_Key_Col[i] : NULL;}
// Methods
virtual bool IsUsingTemp(PGLOBAL) {return false;}
- virtual bool IsIndexed(void) {return false;}
- virtual bool IsSpecial(PSZ name);
+//virtual bool IsIndexed(void) {return false;}
+//virtual bool IsSpecial(PSZ name);
virtual PCATLG GetCat(void);
- virtual PSZ GetPath(void);
+//virtual PSZ GetPath(void);
virtual void PrintAM(FILE *f, char *m);
- virtual RECFM GetFtype(void) {return RECFM_NAF;}
+//virtual RECFM GetFtype(void) {return RECFM_NAF;}
//virtual int GetAffectedRows(void) {return -1;}
- virtual int GetRecpos(void) = 0;
- virtual bool SetRecpos(PGLOBAL g, int recpos);
- virtual bool IsReadOnly(void) {return Read_Only;}
- virtual bool IsView(void) {return FALSE;}
- virtual CHARSET_INFO *data_charset(void);
+//virtual int GetRecpos(void) = 0;
+//virtual bool SetRecpos(PGLOBAL g, int recpos);
+//virtual bool IsReadOnly(void) {return Read_Only;}
+//virtual bool IsView(void) {return FALSE;}
+//virtual CHARSET_INFO *data_charset(void);
virtual int GetProgMax(PGLOBAL g) {return GetMaxSize(g);}
- virtual int GetProgCur(void) {return GetRecpos();}
- virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
- virtual int GetRemote(void) {return 0;}
- virtual void SetFile(PGLOBAL, PSZ) {}
- virtual void ResetDB(void) {}
- virtual void ResetSize(void) {MaxSize = -1;}
+//virtual int GetProgCur(void) {return GetRecpos();}
+//virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
+//virtual int GetRemote(void) {return 0;}
+//virtual void SetFile(PGLOBAL, PSZ) {}
+//virtual void ResetDB(void) {}
+//virtual void ResetSize(void) {MaxSize = -1;}
virtual void RestoreNrec(void) {}
virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox);
virtual PSZ GetServer(void) {return "Current";}
// Database routines
- virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
- virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
- {assert(false); return NULL;}
- virtual PCOL InsertSpecialColumn(PCOL colp);
- virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
- virtual void MarkDB(PGLOBAL g, PTDB tdb2);
+//virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
+//virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
+// {assert(false); return NULL;}
+//virtual PCOL InsertSpecialColumn(PCOL colp);
+//virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
+//virtual void MarkDB(PGLOBAL g, PTDB tdb2);
virtual int MakeIndex(PGLOBAL g, PIXDEF, bool)
{strcpy(g->Message, "Remote index"); return RC_INFO;}
virtual bool ReadKey(PGLOBAL, OPVAL, const key_range *)
@@ -209,19 +241,19 @@ class DllExport TDBASE : public TDB {
"This function should not be called for this table"); return true;}
// Members
- PTABDEF To_Def; // Points to catalog description block
+//PTABDEF To_Def; // Points to catalog description block
PXOB *To_Link; // Points to column of previous relations
PCOL *To_Key_Col; // Points to key columns in current file
PKXBASE To_Kindex; // Points to table key index
PIXDEF To_Xdp; // To the index definition block
- PCOL To_SetCols; // Points to updated columns
+//PCOL To_SetCols; // Points to updated columns
RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT)
- int MaxSize; // Max size in number of lines
+//int MaxSize; // Max size in number of lines
int Knum; // Size of key arrays
- bool Read_Only; // True for read only tables
- const CHARSET_INFO *m_data_charset;
- const char *csname; // Table charset name
- }; // end of class TDBASE
+//bool Read_Only; // True for read only tables
+//const CHARSET_INFO *m_data_charset;
+//const char *csname; // Table charset name
+}; // end of class TDBASE
/***********************************************************************/
/* The abstract base class declaration for the catalog tables. */
@@ -243,7 +275,8 @@ class DllExport TDBCAT : public TDBASE {
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
- virtual int GetMaxSize(PGLOBAL g);
+ virtual int Cardinality(PGLOBAL) {return 10;} // To avoid assert
+ virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -275,7 +308,7 @@ class DllExport CATCOL : public COLBLK {
virtual int GetAmType(void) {return TYPE_AM_ODBC;}
// Methods
- virtual void ReadColumn(PGLOBAL g);
+ virtual void ReadColumn(PGLOBAL g);
protected:
CATCOL(void) {} // Default constructor not to be used
diff --git a/storage/connect/zip.c b/storage/connect/zip.c
index ea54853e858..4bbe31ab7dd 100644
--- a/storage/connect/zip.c
+++ b/storage/connect/zip.c
@@ -637,7 +637,7 @@ local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib
return relativeOffset;
}
-int LoadCentralDirectoryRecord(zip64_internal* pziinit)
+static int LoadCentralDirectoryRecord(zip64_internal* pziinit)
{
int err=ZIP_OK;
ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/
@@ -846,7 +846,7 @@ int LoadCentralDirectoryRecord(zip64_internal* pziinit)
/************************************************************/
-extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def)
+static zipFile zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def)
{
zip64_internal ziinit;
zip64_internal* zi;
@@ -955,7 +955,7 @@ extern zipFile ZEXPORT zipOpen64 (const void* pathname, int append)
return zipOpen3(pathname,append,NULL,NULL);
}
-int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local)
+static int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local)
{
/* write the local header */
int err;
@@ -1752,7 +1752,7 @@ extern int ZEXPORT zipCloseFileInZip (zipFile file)
return zipCloseFileInZipRaw (file,0,0);
}
-int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip)
+static int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip)
{
int err = ZIP_OK;
ZPOS64_T pos = zip64eocd_pos_inzip - zi->add_position_when_writting_offset;
@@ -1774,7 +1774,7 @@ int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eo
return err;
}
-int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip)
+static int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip)
{
int err = ZIP_OK;
@@ -1813,7 +1813,7 @@ int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centra
}
return err;
}
-int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip)
+static int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip)
{
int err = ZIP_OK;
@@ -1861,7 +1861,7 @@ int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir,
return err;
}
-int Write_GlobalComment(zip64_internal* zi, const char* global_comment)
+static int Write_GlobalComment(zip64_internal* zi, const char* global_comment)
{
int err = ZIP_OK;
uInt size_global_comment = 0;
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index fca433f9648..4f24ad75e26 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1880,7 +1880,6 @@ btr_cur_update_alloc_zip_func(
const page_t* page = page_cur_get_page(cursor);
ut_ad(page_zip == page_cur_get_page_zip(cursor));
- ut_ad(page_zip);
ut_ad(!dict_index_is_ibuf(index));
ut_ad(rec_offs_validate(page_cur_get_rec(cursor), index, offsets));
@@ -2962,7 +2961,7 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(page_is_leaf(page_align(rec)));
#ifdef UNIV_DEBUG
- if (btr_cur_print_record_ops && (thr != NULL)) {
+ if (btr_cur_print_record_ops) {
btr_cur_trx_report(thr_get_trx(thr)->id, index, "del mark ");
rec_print_new(stderr, rec, offsets);
}
@@ -4278,7 +4277,6 @@ btr_cur_disown_inherited_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(mtr);
for (i = 0; i < rec_offs_n_fields(offsets); i++) {
if (rec_offs_nth_extern(offsets, i)
@@ -4341,9 +4339,6 @@ btr_push_update_extern_fields(
ulint n;
const upd_field_t* uf;
- ut_ad(tuple);
- ut_ad(update);
-
uf = update->fields;
n = upd_get_n_fields(update);
@@ -4515,7 +4510,6 @@ btr_store_big_rec_extern_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(btr_mtr);
ut_ad(mtr_memo_contains(btr_mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX));
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index d08bde763b3..d5efd87f9e1 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -52,8 +53,8 @@ enum status_severity {
/* Flags that tell the buffer pool dump/load thread which action should it
take after being waked up. */
-static ibool buf_dump_should_start = FALSE;
-static ibool buf_load_should_start = FALSE;
+static volatile bool buf_dump_should_start;
+static volatile bool buf_load_should_start;
static ibool buf_load_abort_flag = FALSE;
@@ -78,7 +79,7 @@ void
buf_dump_start()
/*============*/
{
- buf_dump_should_start = TRUE;
+ buf_dump_should_start = true;
os_event_set(srv_buf_dump_event);
}
@@ -92,7 +93,7 @@ void
buf_load_start()
/*============*/
{
- buf_load_should_start = TRUE;
+ buf_load_should_start = true;
os_event_set(srv_buf_dump_event);
}
@@ -695,15 +696,18 @@ DECLARE_THREAD(buf_dump_thread)(
os_event_wait(srv_buf_dump_event);
if (buf_dump_should_start) {
- buf_dump_should_start = FALSE;
+ buf_dump_should_start = false;
buf_dump(TRUE /* quit on shutdown */);
}
if (buf_load_should_start) {
- buf_load_should_start = FALSE;
+ buf_load_should_start = false;
buf_load();
}
+ if (buf_dump_should_start || buf_load_should_start) {
+ continue;
+ }
os_event_reset(srv_buf_dump_event);
}
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index a139166d5d3..d923514123c 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -732,8 +733,7 @@ buf_flush_write_complete(
flush_type = buf_page_get_flush_type(bpage);
buf_pool->n_flush[flush_type]--;
- /* fprintf(stderr, "n pending flush %lu\n",
- buf_pool->n_flush[flush_type]); */
+ ut_ad(buf_pool_mutex_own(buf_pool));
if (buf_pool->n_flush[flush_type] == 0
&& buf_pool->init_flush[flush_type] == FALSE) {
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 9373f06e7de..03de0b4db7c 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -6178,7 +6178,6 @@ dict_set_corrupted(
row_mysql_lock_data_dictionary(trx);
}
- ut_ad(index);
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(!dict_table_is_comp(dict_sys->sys_tables));
ut_ad(!dict_table_is_comp(dict_sys->sys_indexes));
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 3215fe9a456..05ee6f5886f 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,8 +40,9 @@ Created Apr 25, 2012 Vasil Dimov
#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE)
-/** Event to wake up the stats thread */
-UNIV_INTERN os_event_t dict_stats_event = NULL;
+/** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add()
+or shutdown. Not protected by any mutex. */
+UNIV_INTERN os_event_t dict_stats_event;
/** This mutex protects the "recalc_pool" variable. */
static ib_mutex_t recalc_pool_mutex;
diff --git a/storage/innobase/dyn/dyn0dyn.cc b/storage/innobase/dyn/dyn0dyn.cc
index 3ef5297a7c9..dd1f6863c14 100644
--- a/storage/innobase/dyn/dyn0dyn.cc
+++ b/storage/innobase/dyn/dyn0dyn.cc
@@ -40,7 +40,6 @@ dyn_array_add_block(
mem_heap_t* heap;
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 9d471f9dbd3..72ee8d74a82 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4953,15 +4954,12 @@ fil_extend_space_to_desired_size(
byte* buf;
ulint buf_size;
ulint start_page_no;
- ulint file_start_page_no;
ulint page_size;
- ulint pages_added;
ibool success;
ut_ad(!srv_read_only_mode);
retry:
- pages_added = 0;
success = TRUE;
fil_mutex_enter_and_prepare_for_io(space_id);
@@ -5015,29 +5013,36 @@ retry:
mutex_exit(&fil_system->mutex);
start_page_no = space->size;
- file_start_page_no = space->size - node->size;
-
+ const ulint file_start_page_no = space->size - node->size;
#ifdef HAVE_POSIX_FALLOCATE
if (srv_use_posix_fallocate) {
- os_offset_t start_offset = start_page_no * page_size;
- os_offset_t n_pages = (size_after_extend - start_page_no);
- os_offset_t len = n_pages * page_size;
-
- if (posix_fallocate(node->handle, start_offset, len) == -1) {
- ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file "
- "space for file \'%s\' failed. Current size "
- INT64PF ", desired size " INT64PF "\n",
- node->name, start_offset, len+start_offset);
- os_file_handle_error_no_exit(node->name, "posix_fallocate", FALSE);
- success = FALSE;
- } else {
- success = TRUE;
+ const os_offset_t start_offset
+ = os_offset_t(start_page_no - file_start_page_no)
+ * page_size;
+ const ulint n_pages
+ = size_after_extend - start_page_no;
+ const os_offset_t len = os_offset_t(n_pages) * page_size;
+
+ int err;
+ do {
+ err = posix_fallocate(node->handle, start_offset, len);
+ } while (err == EINTR
+ && srv_shutdown_state == SRV_SHUTDOWN_NONE);
+
+ success = !err;
+ if (!success) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
+ " from " INT64PF " to " INT64PF " bytes"
+ " failed with error %d",
+ node->name, start_offset, len + start_offset,
+ err);
}
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
- success = FALSE; errno = 28; os_has_said_disk_full = TRUE;);
+ success = FALSE; os_has_said_disk_full = TRUE;);
mutex_enter(&fil_system->mutex);
+
if (success) {
node->size += n_pages;
space->size += n_pages;
@@ -5054,14 +5059,24 @@ retry:
}
#endif
+#ifdef _WIN32
+ /* Write 1 page of zeroes at the desired end. */
+ start_page_no = size_after_extend - 1;
+ buf_size = page_size;
+#else
/* Extend at most 64 pages at a time */
buf_size = ut_min(64, size_after_extend - start_page_no) * page_size;
- buf2 = static_cast<byte*>(mem_alloc(buf_size + page_size));
+#endif
+ buf2 = static_cast<byte*>(calloc(1, buf_size + page_size));
+ if (!buf2) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Cannot allocate " ULINTPF
+ " bytes to extend file",
+ buf_size + page_size);
+ success = FALSE;
+ }
buf = static_cast<byte*>(ut_align(buf2, page_size));
- memset(buf, 0, buf_size);
-
- while (start_page_no < size_after_extend) {
+ while (success && start_page_no < size_after_extend) {
ulint n_pages
= ut_min(buf_size / page_size,
size_after_extend - start_page_no);
@@ -5070,56 +5085,47 @@ retry:
= ((os_offset_t) (start_page_no - file_start_page_no))
* page_size;
- const char* name = node->name == NULL ? space->name : node->name;
-
#ifdef UNIV_HOTBACKUP
- success = os_file_write(name, node->handle, buf,
+ success = os_file_write(node->name, node->handle, buf,
offset, page_size * n_pages);
#else
success = os_aio(OS_FILE_WRITE, OS_AIO_SYNC,
- name, node->handle, buf,
+ node->name, node->handle, buf,
offset, page_size * n_pages,
NULL, NULL);
#endif /* UNIV_HOTBACKUP */
-
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
- success = FALSE; errno = 28; os_has_said_disk_full = TRUE;);
-
- if (success) {
- os_has_said_disk_full = FALSE;
- } else {
- /* Let us measure the size of the file to determine
- how much we were able to extend it */
- os_offset_t size;
+ success = FALSE; os_has_said_disk_full = TRUE;);
- size = os_file_get_size(node->handle);
- ut_a(size != (os_offset_t) -1);
+ /* Let us measure the size of the file to determine
+ how much we were able to extend it */
+ os_offset_t size = os_file_get_size(node->handle);
+ ut_a(size != (os_offset_t) -1);
- n_pages = ((ulint) (size / page_size))
- - node->size - pages_added;
-
- pages_added += n_pages;
- break;
- }
-
- start_page_no += n_pages;
- pages_added += n_pages;
+ start_page_no = (ulint) (size / page_size)
+ + file_start_page_no;
}
- mem_free(buf2);
+ free(buf2);
mutex_enter(&fil_system->mutex);
ut_a(node->being_extended);
+ ut_a(start_page_no - file_start_page_no >= node->size);
- space->size += pages_added;
- node->size += pages_added;
+ if (buf) {
+ ulint file_size = start_page_no - file_start_page_no;
+ space->size += file_size - node->size;
+ node->size = file_size;
+ }
fil_node_complete_io(node, fil_system, OS_FILE_WRITE);
/* At this point file has been extended */
+#ifdef HAVE_POSIX_FALLOCATE
file_extended:
+#endif /* HAVE_POSIX_FALLOCATE */
node->being_extended = FALSE;
*actual_size = space->size;
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index afdd6667711..1732a02aeac 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -1064,8 +1064,6 @@ fsp_fill_free_list(
ulint i;
mtr_t ibuf_mtr;
- ut_ad(header != NULL);
- ut_ad(mtr != NULL);
ut_ad(page_offset(header) == FSP_HEADER_OFFSET);
/* Check if we can fill free list from above the free list limit */
@@ -1348,9 +1346,6 @@ fsp_alloc_free_page(
ulint page_no;
ulint space_size;
- ut_ad(mtr);
- ut_ad(init_mtr);
-
header = fsp_get_space_header(space, zip_size, mtr);
/* Get the hinted descriptor */
@@ -2363,7 +2358,6 @@ fseg_alloc_free_page_low(
ibool success;
ulint n;
- ut_ad(mtr);
ut_ad((direction >= FSP_UP) && (direction <= FSP_NO_DIR));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
@@ -2801,6 +2795,7 @@ try_again:
}
} else {
ut_a(alloc_type == FSP_CLEANING);
+ reserve = 0;
}
success = fil_space_reserve_free_extents(space, n_free, n_ext);
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 6059c28eabc..18d09c8138f 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2016, MariaDB Corporation. All Rights reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1934,6 +1935,8 @@ func_exit:
/*************************************************************//**
Wrapper function of fts_create_index_tables_low(), create auxiliary
tables for an FTS index
+
+@see row_merge_create_fts_sort_index()
@return: DB_SUCCESS or error code */
static
dict_table_t*
@@ -1965,13 +1968,12 @@ fts_create_one_index_table(
(int)(field->col->prtype & DATA_MYSQL_TYPE_MASK),
(uint) dtype_get_charset_coll(field->col->prtype));
- if (strcmp(charset->name, "latin1_swedish_ci") == 0) {
- dict_mem_table_add_col(new_table, heap, "word", DATA_VARCHAR,
- field->col->prtype, FTS_MAX_WORD_LEN);
- } else {
- dict_mem_table_add_col(new_table, heap, "word", DATA_VARMYSQL,
- field->col->prtype, FTS_MAX_WORD_LEN);
- }
+ dict_mem_table_add_col(new_table, heap, "word",
+ charset == &my_charset_latin1
+ ? DATA_VARCHAR : DATA_VARMYSQL,
+ field->col->prtype,
+ FTS_MAX_WORD_LEN_IN_CHAR
+ * DATA_MBMAXLEN(field->col->mbminmaxlen));
dict_mem_table_add_col(new_table, heap, "first_doc_id", DATA_INT,
DATA_NOT_NULL | DATA_UNSIGNED,
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 455e2669c0d..cb30122adcb 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2016, MariaDB Corporation. All Rights reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -281,7 +282,7 @@ fts_zip_initialize(
zip->last_big_block = 0;
zip->word.f_len = 0;
- memset(zip->word.f_str, 0, FTS_MAX_WORD_LEN);
+ *zip->word.f_str = 0;
ib_vector_reset(zip->blocks);
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 6fe56892ae6..084a3090f15 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -4,7 +4,7 @@ Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2016, MariaDB Corporation.
+Copyright (c) 2013, 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -3604,7 +3604,6 @@ innobase_change_buffering_inited_ok:
and consequently we do not need to know the ordering internally in
InnoDB. */
- ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci"));
srv_latin1_ordering = my_charset_latin1.sort_order;
innobase_commit_concurrency_init_default();
@@ -6390,18 +6389,16 @@ get_innobase_type_from_mysql_type(
case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */
if (field->binary()) {
return(DATA_BINARY);
- } else if (strcmp(field->charset()->name,
- "latin1_swedish_ci") == 0) {
+ } else if (field->charset() == &my_charset_latin1) {
return(DATA_VARCHAR);
} else {
return(DATA_VARMYSQL);
}
case MYSQL_TYPE_BIT:
- case MYSQL_TYPE_STRING: if (field->binary()) {
-
+ case MYSQL_TYPE_STRING:
+ if (field->binary()) {
return(DATA_FIXBINARY);
- } else if (strcmp(field->charset()->name,
- "latin1_swedish_ci") == 0) {
+ } else if (field->charset() == &my_charset_latin1) {
return(DATA_CHAR);
} else {
return(DATA_MYSQL);
@@ -18497,13 +18494,6 @@ static MYSQL_SYSVAR_ULONG(force_recovery, srv_force_recovery,
"Helps to save your data in case the disk image of the database becomes corrupt.",
NULL, NULL, 0, 0, 6, 0);
-#ifndef DBUG_OFF
-static MYSQL_SYSVAR_ULONG(force_recovery_crash, srv_force_recovery_crash,
- PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
- "Kills the server during crash recovery.",
- NULL, NULL, 0, 0, 10, 0);
-#endif /* !DBUG_OFF */
-
static MYSQL_SYSVAR_ULONG(page_size, srv_page_size,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Page size to use for all InnoDB tablespaces.",
@@ -18906,9 +18896,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(flush_log_at_trx_commit),
MYSQL_SYSVAR(flush_method),
MYSQL_SYSVAR(force_recovery),
-#ifndef DBUG_OFF
- MYSQL_SYSVAR(force_recovery_crash),
-#endif /* !DBUG_OFF */
MYSQL_SYSVAR(ft_cache_size),
MYSQL_SYSVAR(ft_total_cache_size),
MYSQL_SYSVAR(ft_result_cache_limit),
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index b80e5d505ac..78e961d91b7 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1905,7 +1906,9 @@ struct buf_pool_t{
os_event_t no_flush[BUF_FLUSH_N_TYPES];
/*!< this is in the set state
when there is no flush batch
- of the given type running */
+ of the given type running;
+ os_event_set() and os_event_reset()
+ are protected by buf_pool_t::mutex */
ib_rbt_t* flush_rbt; /*!< a red-black tree is used
exclusively during recovery to
speed up insertions in the
diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h
index a62a6400d97..5582778825c 100644
--- a/storage/innobase/include/buf0dblwr.h
+++ b/storage/innobase/include/buf0dblwr.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -134,11 +135,13 @@ struct buf_dblwr_t{
ulint b_reserved;/*!< number of slots currently reserved
for batch flush. */
os_event_t b_event;/*!< event where threads wait for a
- batch flush to end. */
+ batch flush to end;
+ os_event_set() and os_event_reset()
+ are protected by buf_dblwr_t::mutex */
ulint s_reserved;/*!< number of slots currently
reserved for single page flushes. */
os_event_t s_event;/*!< event where threads wait for a
- single page flush slot. */
+ single page flush slot. Protected by mutex. */
bool* in_use; /*!< flag used to indicate if a slot is
in use. Only used for single page
flushes. */
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 6fc6098f3d9..1ed231aa248 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -266,7 +266,6 @@ dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_CLUSTERED);
@@ -280,7 +279,6 @@ dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_UNIQUE);
@@ -295,7 +293,6 @@ dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_IBUF);
@@ -327,7 +324,6 @@ dict_index_is_sec_or_ibuf(
{
ulint type;
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
type = index->type;
@@ -345,7 +341,6 @@ dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols - DATA_N_SYS_COLS);
@@ -377,7 +372,6 @@ dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols);
@@ -1373,7 +1367,6 @@ dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return((index->type & DICT_CORRUPT)
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 1c674134514..9bbb836663f 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -133,7 +133,7 @@ allows InnoDB to update_create_info() accordingly. */
+ DICT_TF_WIDTH_DATA_DIR)
/** A mask of all the known/used bits in table flags */
-#define DICT_TF_BIT_MASK (~(~0 << DICT_TF_BITS))
+#define DICT_TF_BIT_MASK (~(~0U << DICT_TF_BITS))
/** Zero relative shift position of the COMPACT field */
#define DICT_TF_POS_COMPACT 0
diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h
index 82cd2b468b9..1973380d197 100644
--- a/storage/innobase/include/dict0stats_bg.h
+++ b/storage/innobase/include/dict0stats_bg.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -32,7 +33,8 @@ Created Apr 26, 2012 Vasil Dimov
#include "os0sync.h" /* os_event_t */
#include "os0thread.h" /* DECLARE_THREAD */
-/** Event to wake up the stats thread */
+/** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add()
+or shutdown. Not protected by any mutex. */
extern os_event_t dict_stats_event;
/*****************************************************************//**
diff --git a/storage/innobase/include/dyn0dyn.ic b/storage/innobase/include/dyn0dyn.ic
index f18f2e6dff9..13003862638 100644
--- a/storage/innobase/include/dyn0dyn.ic
+++ b/storage/innobase/include/dyn0dyn.ic
@@ -47,8 +47,6 @@ dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
{
- ut_ad(block);
-
return((block->used) & ~DYN_BLOCK_FULL_FLAG);
}
@@ -76,7 +74,6 @@ dyn_array_create(
dyn_array_t* arr) /*!< in/out: memory buffer of
size sizeof(dyn_array_t) */
{
- ut_ad(arr);
#if DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG
# error "DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG"
#endif
@@ -119,7 +116,6 @@ dyn_array_push(
dyn_block_t* block;
ulint used;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -159,7 +155,6 @@ dyn_array_open(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -195,7 +190,6 @@ dyn_array_close(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
block = dyn_array_get_last_block(arr);
@@ -222,7 +216,6 @@ dyn_array_get_element(
{
const dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
/* Get the first array block */
@@ -260,7 +253,6 @@ dyn_array_get_data_size(
const dyn_block_t* block;
ulint sum = 0;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 35a4b2496d4..f29d1bf3408 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -206,7 +207,9 @@ struct fil_node_t {
ibool open; /*!< TRUE if file open */
os_file_t handle; /*!< OS handle to the file, if file open */
os_event_t sync_event;/*!< Condition event to group and
- serialize calls to fsync */
+ serialize calls to fsync;
+ os_event_set() and os_event_reset()
+ are protected by fil_system_t::mutex */
ibool is_raw_disk;/*!< TRUE if the 'file' is actually a raw
device or a raw disk partition */
ulint size; /*!< size of the file in database pages, 0 if
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 099cb8edc14..0097537e4d6 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -61,7 +61,7 @@ is found in a remote location, not the default data directory. */
+ FSP_FLAGS_WIDTH_DATA_DIR)
/** A mask of all the known/used bits in tablespace flags */
-#define FSP_FLAGS_MASK (~(~0 << FSP_FLAGS_WIDTH))
+#define FSP_FLAGS_MASK (~(~0U << FSP_FLAGS_WIDTH))
/** Zero relative shift position of the POST_ANTELOPE field */
#define FSP_FLAGS_POS_POST_ANTELOPE 0
diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h
index 94fd908ab0c..9734f4c8abc 100644
--- a/storage/innobase/include/fsp0types.h
+++ b/storage/innobase/include/fsp0types.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h
index 3e2f359bbeb..7aa7055640c 100644
--- a/storage/innobase/include/fts0fts.h
+++ b/storage/innobase/include/fts0fts.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2016, MariaDB Corporation. All Rights reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -366,8 +367,8 @@ extern ulong fts_min_token_size;
need a sync to free some memory */
extern bool fts_need_sync;
-/** Maximum possible Fulltext word length */
-#define FTS_MAX_WORD_LEN HA_FT_MAXBYTELEN
+/** Maximum possible Fulltext word length in bytes (assuming mbmaxlen=4) */
+#define FTS_MAX_WORD_LEN (HA_FT_MAXCHARLEN * 4)
/** Maximum possible Fulltext word length (in characters) */
#define FTS_MAX_WORD_LEN_IN_CHAR HA_FT_MAXCHARLEN
diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h
index e495fe72a60..0dad75d8f1b 100644
--- a/storage/innobase/include/fts0types.h
+++ b/storage/innobase/include/fts0types.h
@@ -126,7 +126,9 @@ struct fts_sync_t {
bool in_progress; /*!< flag whether sync is in progress.*/
bool unlock_cache; /*!< flag whether unlock cache when
write fts node */
- os_event_t event; /*!< sync finish event */
+ os_event_t event; /*!< sync finish event;
+ only os_event_set() and os_event_wait()
+ are used */
};
/** The cache for the FTS system. It is a memory-based inverted index
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index c68651aa03b..2547097a14b 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -928,7 +928,12 @@ struct lock_sys_t{
srv_slot_t* waiting_threads; /*!< Array of user threads
suspended while waiting for
locks within InnoDB, protected
- by the lock_sys->wait_mutex */
+ by the lock_sys->wait_mutex;
+ os_event_set() and
+ os_event_reset() on
+ waiting_threads[]->event
+ are protected by
+ trx_t::mutex */
srv_slot_t* last_slot; /*!< highest slot ever used
in the waiting_threads array,
protected by
@@ -941,10 +946,11 @@ struct lock_sys_t{
ulint n_lock_max_wait_time; /*!< Max wait time */
- os_event_t timeout_event; /*!< Set to the event that is
- created in the lock wait monitor
- thread. A value of 0 means the
- thread is not active */
+ os_event_t timeout_event; /*!< An event waited for by
+ lock_wait_timeout_thread.
+ Not protected by a mutex,
+ but the waits are timed.
+ Signaled on shutdown only. */
bool timeout_thread_active; /*!< True if the timeout thread
is running */
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index ad9710b1870..44074a61952 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2009, Google Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -875,10 +876,8 @@ struct log_t{
be 'flush_or_write'! */
os_event_t no_flush_event; /*!< this event is in the reset state
when a flush or a write is running;
- a thread should wait for this without
- owning the log mutex, but NOTE that
- to set or reset this event, the
- thread MUST own the log mutex! */
+ os_event_set() and os_event_reset()
+ are protected by log_sys_t::mutex */
ibool one_flushed; /*!< during a flush, this is
first FALSE and becomes TRUE
when one log group has been
@@ -887,11 +886,9 @@ struct log_t{
flush or write has not yet completed
for any log group; e.g., this means
that a transaction has been committed
- when this is set; a thread should wait
- for this without owning the log mutex,
- but NOTE that to set or reset this
- event, the thread MUST own the log
- mutex! */
+ when this is set;
+ os_event_set() and os_event_reset()
+ are protected by log_sys_t::mutex */
ulint n_log_ios; /*!< number of log i/os initiated thus
far */
ulint n_log_ios_old; /*!< number of log i/o's at the
@@ -976,9 +973,9 @@ struct log_t{
ulint archive_buf_size;/*!< size of archive_buf */
byte* archive_buf; /*!< log segment is written to the
archive from this buffer */
- os_event_t archiving_on; /*!< if archiving has been stopped,
- a thread can wait for this event to
- become signaled */
+ os_event_t archiving_on; /*!< if archiving has been stopped;
+ os_event_set() and os_event_reset()
+ are protected by log_sys_t::mutex */
/* @} */
#endif /* UNIV_LOG_ARCHIVE */
};
diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic
index c46fcec107e..6b303979b18 100644
--- a/storage/innobase/include/mach0data.ic
+++ b/storage/innobase/include/mach0data.ic
@@ -52,7 +52,6 @@ mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
{
- ut_ad(b);
return((ulint)(b[0]));
}
@@ -149,7 +148,6 @@ mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 16)
| ((ulint)(b[1]) << 8)
| (ulint)(b[2])
@@ -186,7 +184,6 @@ mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 24)
| ((ulint)(b[1]) << 16)
| ((ulint)(b[2]) << 8)
@@ -265,8 +262,6 @@ mach_read_compressed(
{
ulint flag;
- ut_ad(b);
-
flag = mach_read_from_1(b);
if (flag < 0x80UL) {
@@ -347,8 +342,6 @@ mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_3(b), mach_read_from_4(b + 3)));
}
@@ -378,8 +371,6 @@ mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_2(b), mach_read_from_4(b + 2)));
}
@@ -427,8 +418,6 @@ mach_ull_read_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
n = (ib_uint64_t) mach_read_compressed(b);
size = mach_get_compressed_size((ulint) n);
@@ -494,8 +483,6 @@ mach_ull_read_much_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
if (*b != (byte)0xFF) {
n = 0;
size = 0;
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index e7fa1926bc7..6cb4f54d629 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -1143,6 +1144,7 @@ UNIV_INTERN
void
os_aio_simulated_wake_handler_threads(void);
/*=======================================*/
+#ifdef _WIN32
/**********************************************************************//**
This function can be called if one wants to post a batch of reads and
prefers an i/o-handler thread to handle them all at once later. You must
@@ -1150,8 +1152,10 @@ call os_aio_simulated_wake_handler_threads later to ensure the threads
are not left sleeping! */
UNIV_INTERN
void
-os_aio_simulated_put_read_threads_to_sleep(void);
-/*============================================*/
+os_aio_simulated_put_read_threads_to_sleep();
+#else /* _WIN32 */
+# define os_aio_simulated_put_read_threads_to_sleep()
+#endif /* _WIN32 */
#ifdef WIN_ASYNC_IO
/**********************************************************************//**
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index 99e17001c0a..a4d3bd40426 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -160,7 +160,6 @@ page_header_get_offs(
{
ulint offs;
- ut_ad(page);
ut_ad((field == PAGE_FREE)
|| (field == PAGE_LAST_INSERT)
|| (field == PAGE_HEAP_TOP));
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 4b9b8281d04..7795cf9b309 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation Ab. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -145,13 +145,16 @@ extern const char* srv_main_thread_op_info;
/** Prefix used by MySQL to indicate pre-5.1 table name encoding */
extern const char srv_mysql50_table_name_prefix[10];
-/* The monitor thread waits on this event. */
+/** Event to signal srv_monitor_thread. Not protected by a mutex.
+Set after setting srv_print_innodb_monitor. */
extern os_event_t srv_monitor_event;
-/* The error monitor thread waits on this event. */
+/** Event to signal the shutdown of srv_error_monitor_thread.
+Not protected by a mutex. */
extern os_event_t srv_error_event;
-/** The buffer pool dump/load thread waits on this event. */
+/** Event for waking up buf_dump_thread. Not protected by a mutex.
+Set on shutdown or by buf_dump_start() or buf_load_start(). */
extern os_event_t srv_buf_dump_event;
/** The buffer pool dump/load file name */
@@ -349,9 +352,6 @@ extern double srv_adaptive_flushing_lwm;
extern ulong srv_flushing_avg_loops;
extern ulong srv_force_recovery;
-#ifndef DBUG_OFF
-extern ulong srv_force_recovery_crash;
-#endif /* !DBUG_OFF */
extern ulint srv_fast_shutdown; /*!< If this is 1, do not do a
purge and index buffer merge.
@@ -785,17 +785,12 @@ ulint
srv_get_task_queue_length(void);
/*===========================*/
-/*********************************************************************//**
-Releases threads of the type given from suspension in the thread table.
-NOTE! The server mutex has to be reserved by the caller!
-@return number of threads released: this may be less than n if not
-enough threads were suspended at the moment */
-UNIV_INTERN
-ulint
-srv_release_threads(
-/*================*/
- enum srv_thread_type type, /*!< in: thread type */
- ulint n); /*!< in: number of threads to release */
+/** Ensure that a given number of threads of the type given are running
+(or are already terminated).
+@param[in] type thread type
+@param[in] n number of threads that have to run */
+void
+srv_release_threads(enum srv_thread_type type, ulint n);
/**********************************************************************//**
Check whether any background thread are active. If so print which thread
@@ -806,12 +801,10 @@ const char*
srv_any_background_threads_are_active(void);
/*=======================================*/
-/**********************************************************************//**
-Wakeup the purge threads. */
+/** Wake up the purge threads. */
UNIV_INTERN
void
-srv_purge_wakeup(void);
-/*==================*/
+srv_purge_wakeup();
/** Status variables to be passed to MySQL */
struct export_var_t{
diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index 1e13c883800..f72652963c9 100644
--- a/storage/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -145,7 +146,10 @@ struct trx_purge_t{
log operation can prevent this by
obtaining an s-latch here. It also
protects state and running */
- os_event_t event; /*!< State signal event */
+ os_event_t event; /*!< State signal event;
+ os_event_set() and os_event_reset()
+ are protected by trx_purge_t::latch
+ X-lock */
ulint n_stop; /*!< Counter to track number stops */
volatile bool running; /*!< true, if purge is active,
we check this without the latch too */
diff --git a/storage/innobase/include/ut0wqueue.h b/storage/innobase/include/ut0wqueue.h
index 33385ddf2d4..09b2eb717a7 100644
--- a/storage/innobase/include/ut0wqueue.h
+++ b/storage/innobase/include/ut0wqueue.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2006, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -99,7 +100,9 @@ ib_wqueue_timedwait(
struct ib_wqueue_t {
ib_mutex_t mutex; /*!< mutex protecting everything */
ib_list_t* items; /*!< work item list */
- os_event_t event; /*!< event we use to signal additions to list */
+ os_event_t event; /*!< event we use to signal additions to list;
+ os_event_set() and os_event_reset() are
+ protected by ib_wqueue_t::mutex */
};
#endif
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 8f2af1b0454..57516d7c8f4 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1459,17 +1460,7 @@ log_write_up_to(
}
loop:
-#ifdef UNIV_DEBUG
- loop_count++;
-
- ut_ad(loop_count < 5);
-
-# if 0
- if (loop_count > 2) {
- fprintf(stderr, "Log loop count %lu\n", loop_count);
- }
-# endif
-#endif
+ ut_ad(++loop_count < 100);
mutex_enter(&(log_sys->mutex));
ut_ad(!recv_no_log_write);
@@ -3204,7 +3195,6 @@ logs_empty_and_mark_files_at_shutdown(void)
lsn_t lsn;
ulint arch_log_no;
ulint count = 0;
- ulint total_trx;
ulint pending_io;
enum srv_thread_type active_thd;
const char* thread_name;
@@ -3251,10 +3241,9 @@ loop:
shutdown, because the InnoDB layer may have committed or
prepared transactions and we don't want to lose them. */
- total_trx = trx_sys_any_active_transactions();
-
- if (total_trx > 0) {
-
+ if (ulint total_trx = srv_was_started && !srv_read_only_mode
+ && srv_force_recovery < SRV_FORCE_NO_TRX_UNDO
+ ? trx_sys_any_active_transactions() : 0) {
if (srv_print_verbose_log && count > 600) {
ib_logf(IB_LOG_LEVEL_INFO,
"Waiting for %lu active transactions to finish",
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index aed94d00834..6137c84b21d 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -174,7 +174,7 @@ UNIV_INTERN mysql_pfs_key_t recv_writer_mutex_key;
# endif /* UNIV_PFS_MUTEX */
/** Flag indicating if recv_writer thread is active. */
-UNIV_INTERN bool recv_writer_thread_active = false;
+static volatile bool recv_writer_thread_active;
UNIV_INTERN os_thread_t recv_writer_thread_handle = 0;
#endif /* !UNIV_HOTBACKUP */
@@ -344,8 +344,6 @@ DECLARE_THREAD(recv_writer_thread)(
os_thread_pf(os_thread_get_curr_id()));
#endif /* UNIV_DEBUG_THREAD_CREATION */
- recv_writer_thread_active = true;
-
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
os_thread_sleep(100000);
@@ -2806,11 +2804,10 @@ recv_scan_log_recs(
recv_init_crash_recovery();
} else {
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "Recovery skipped, "
- "--innodb-read-only set!");
-
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "innodb_read_only prevents"
+ " crash recovery");
+ recv_needed_recovery = TRUE;
return(TRUE);
}
}
@@ -2989,6 +2986,7 @@ recv_init_crash_recovery(void)
/* Spawn the background thread to flush dirty pages
from the buffer pools. */
+ recv_writer_thread_active = true;
recv_writer_thread_handle = os_thread_create(
recv_writer_thread, 0, 0);
}
@@ -3227,6 +3225,11 @@ recv_recovery_from_checkpoint_start_func(
/* Done with startup scan. Clear the flag. */
recv_log_scan_is_startup_type = FALSE;
+
+ if (srv_read_only_mode && recv_needed_recovery) {
+ return(DB_READ_ONLY);
+ }
+
if (TYPE_CHECKPOINT) {
/* NOTE: we always do a 'recovery' at startup, but only if
there is something wrong we will print a message to the
@@ -3377,15 +3380,6 @@ void
recv_recovery_from_checkpoint_finish(void)
/*======================================*/
{
- /* Apply the hashed log records to the respective file pages */
-
- if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
-
- recv_apply_hashed_log_recs(TRUE);
- }
-
- DBUG_PRINT("ib_log", ("apply completed"));
-
if (recv_needed_recovery) {
trx_sys_print_mysql_master_log_pos();
trx_sys_print_mysql_binlog_offset();
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index f60ec648ba1..6e87671817e 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -309,7 +309,6 @@ mtr_commit(
/*=======*/
mtr_t* mtr) /*!< in: mini-transaction */
{
- ut_ad(mtr);
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(!mtr->inside_ibuf);
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index a9b9cb0fa96..57503fd77f7 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
+Copyright (c) 2012, 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -199,11 +200,15 @@ struct os_aio_array_t{
os_event_t not_full;
/*!< The event which is set to the
signaled state when there is space in
- the aio outside the ibuf segment */
+ the aio outside the ibuf segment;
+ os_event_set() and os_event_reset()
+ are protected by os_aio_array_t::mutex */
os_event_t is_empty;
/*!< The event which is set to the
signaled state when there are no
- pending i/os in this array */
+ pending i/os in this array;
+ os_event_set() and os_event_reset()
+ are protected by os_aio_array_t::mutex */
ulint n_slots;/*!< Total number of slots in the aio
array. This must be divisible by
n_threads. */
@@ -254,8 +259,8 @@ struct os_aio_array_t{
#define OS_AIO_IO_SETUP_RETRY_ATTEMPTS 5
#endif
-/** Array of events used in simulated aio */
-static os_event_t* os_aio_segment_wait_events = NULL;
+/** Array of events used in simulated aio. */
+static os_event_t* os_aio_segment_wait_events;
/** The aio arrays for non-ibuf i/o and ibuf i/o, as well as sync aio. These
are NULL when the module has not yet been initialized. @{ */
@@ -2135,48 +2140,52 @@ os_file_set_size(
os_file_t file, /*!< in: handle to a file */
os_offset_t size) /*!< in: file size */
{
- os_offset_t current_size;
ibool ret;
byte* buf;
byte* buf2;
ulint buf_size;
- current_size = 0;
-
#ifdef HAVE_POSIX_FALLOCATE
if (srv_use_posix_fallocate) {
+ int err;
+ do {
+ err = posix_fallocate(file, 0, size);
+ } while (err == EINTR
+ && srv_shutdown_state == SRV_SHUTDOWN_NONE);
- if (posix_fallocate(file, current_size, size) == -1) {
-
- fprintf(stderr, "InnoDB: Error: preallocating file "
- "space for file \'%s\' failed. Current size "
- "%lu, desired size %lu\n",
- name, (long unsigned) current_size, (long unsigned) size);
- os_file_handle_error_no_exit(name, "posix_fallocate", FALSE);
- return(FALSE);
+ if (err) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "preallocating " INT64PF " bytes for"
+ "file %s failed with error %d",
+ size, name, err);
}
- return(TRUE);
+ return(!err);
}
#endif
-
+#ifdef _WIN32
+ /* Write 1 page of zeroes at the desired end. */
+ buf_size = UNIV_PAGE_SIZE;
+ os_offset_t current_size = size - buf_size;
+#else
/* Write up to 1 megabyte at a time. */
buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
- buf2 = static_cast<byte*>(ut_malloc(buf_size + UNIV_PAGE_SIZE));
+ os_offset_t current_size = 0;
+#endif
+ buf2 = static_cast<byte*>(calloc(1, buf_size + UNIV_PAGE_SIZE));
+
+ if (!buf2) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot allocate " ULINTPF " bytes to extend file\n",
+ buf_size + UNIV_PAGE_SIZE);
+ return(FALSE);
+ }
/* Align the buffer for possible raw i/o */
buf = static_cast<byte*>(ut_align(buf2, UNIV_PAGE_SIZE));
- /* Write buffer full of zeros */
- memset(buf, 0, buf_size);
-
- if (size >= (os_offset_t) 100 << 20) {
-
- fprintf(stderr, "InnoDB: Progress in MB:");
- }
-
- while (current_size < size) {
+ do {
ulint n_bytes;
if (size - current_size < (os_offset_t) buf_size) {
@@ -2187,37 +2196,15 @@ os_file_set_size(
ret = os_file_write(name, file, buf, current_size, n_bytes);
if (!ret) {
- ut_free(buf2);
- goto error_handling;
- }
-
- /* Print about progress for each 100 MB written */
- if ((current_size + n_bytes) / (100 << 20)
- != current_size / (100 << 20)) {
-
- fprintf(stderr, " %lu00",
- (ulong) ((current_size + n_bytes)
- / (100 << 20)));
+ break;
}
current_size += n_bytes;
- }
-
- if (size >= (os_offset_t) 100 << 20) {
-
- fprintf(stderr, "\n");
- }
-
- ut_free(buf2);
-
- ret = os_file_flush(file);
+ } while (current_size < size);
- if (ret) {
- return(TRUE);
- }
+ free(buf2);
-error_handling:
- return(FALSE);
+ return(ret && os_file_flush(file));
}
/***********************************************************************//**
@@ -4028,6 +4015,12 @@ os_aio_init(
os_aio_validate();
+ os_last_printout = ut_time();
+
+ if (srv_use_native_aio) {
+ return(TRUE);
+ }
+
os_aio_segment_wait_events = static_cast<os_event_t*>(
ut_malloc(n_segments * sizeof *os_aio_segment_wait_events));
@@ -4035,10 +4028,7 @@ os_aio_init(
os_aio_segment_wait_events[i] = os_event_create();
}
- os_last_printout = ut_time();
-
return(TRUE);
-
}
/***********************************************************************
@@ -4066,8 +4056,10 @@ os_aio_free(void)
os_aio_array_free(os_aio_read_array);
- for (ulint i = 0; i < os_aio_n_segments; i++) {
- os_event_free(os_aio_segment_wait_events[i]);
+ if (!srv_use_native_aio) {
+ for (ulint i = 0; i < os_aio_n_segments; i++) {
+ os_event_free(os_aio_segment_wait_events[i]);
+ }
}
ut_free(os_aio_segment_wait_events);
@@ -4116,22 +4108,17 @@ os_aio_wake_all_threads_at_shutdown(void)
if (os_aio_log_array != 0) {
os_aio_array_wake_win_aio_at_shutdown(os_aio_log_array);
}
-
#elif defined(LINUX_NATIVE_AIO)
-
/* When using native AIO interface the io helper threads
wait on io_getevents with a timeout value of 500ms. At
each wake up these threads check the server status.
No need to do anything to wake them up. */
+#endif /* !WIN_ASYNC_AIO */
if (srv_use_native_aio) {
return;
}
- /* Fall through to simulated AIO handler wakeup if we are
- not using native AIO. */
-#endif /* !WIN_ASYNC_AIO */
-
/* This loop wakes up all simulated ai/o threads */
for (ulint i = 0; i < os_aio_n_segments; i++) {
@@ -4500,6 +4487,7 @@ os_aio_simulated_wake_handler_threads(void)
}
}
+#ifdef _WIN32
/**********************************************************************//**
This function can be called if one wants to post a batch of reads and
prefers an i/o-handler thread to handle them all at once later. You must
@@ -4507,15 +4495,14 @@ call os_aio_simulated_wake_handler_threads later to ensure the threads
are not left sleeping! */
UNIV_INTERN
void
-os_aio_simulated_put_read_threads_to_sleep(void)
-/*============================================*/
+os_aio_simulated_put_read_threads_to_sleep()
{
/* The idea of putting background IO threads to sleep is only for
Windows when using simulated AIO. Windows XP seems to schedule
background threads too eagerly to allow for coalescing during
readahead requests. */
-#ifdef __WIN__
+
os_aio_array_t* array;
if (srv_use_native_aio) {
@@ -4534,8 +4521,8 @@ readahead requests. */
os_event_reset(os_aio_segment_wait_events[i]);
}
}
-#endif /* __WIN__ */
}
+#endif /* _WIN32 */
#if defined(LINUX_NATIVE_AIO)
/*******************************************************************//**
@@ -5745,11 +5732,12 @@ os_aio_print(
srv_io_thread_op_info[i],
srv_io_thread_function[i]);
-#ifndef __WIN__
- if (os_aio_segment_wait_events[i]->is_set) {
+#ifndef _WIN32
+ if (!srv_use_native_aio
+ && os_aio_segment_wait_events[i]->is_set) {
fprintf(file, " ev set");
}
-#endif /* __WIN__ */
+#endif /* _WIN32 */
fprintf(file, "\n");
}
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index 95b8db0ccc8..090848a4525 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -1442,7 +1442,6 @@ page_dir_split_slot(
ulint i;
ulint n_owned;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
@@ -1504,7 +1503,6 @@ page_dir_balance_slot(
rec_t* old_rec;
rec_t* new_rec;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index e7d545ddb52..e8a147aab47 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -4807,8 +4807,6 @@ page_zip_parse_compress(
ulint size;
ulint trailer_size;
- ut_ad(ptr != NULL);
- ut_ad(end_ptr != NULL);
ut_ad(!page == !page_zip);
if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) {
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index e4a88047955..4865c8e850a 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -57,6 +57,8 @@ tokenized doc string. The index has three "fields":
integer value)
3) Word's position in original doc.
+@see fts_create_one_index_table()
+
@return dict_index_t structure for the fts sort index */
UNIV_INTERN
dict_index_t*
@@ -96,16 +98,12 @@ row_merge_create_fts_sort_index(
field->prefix_len = 0;
field->col = static_cast<dict_col_t*>(
mem_heap_alloc(new_index->heap, sizeof(dict_col_t)));
- field->col->len = FTS_MAX_WORD_LEN;
-
- if (strcmp(charset->name, "latin1_swedish_ci") == 0) {
- field->col->mtype = DATA_VARCHAR;
- } else {
- field->col->mtype = DATA_VARMYSQL;
- }
-
field->col->prtype = idx_field->col->prtype | DATA_NOT_NULL;
+ field->col->mtype = charset == &my_charset_latin1
+ ? DATA_VARCHAR : DATA_VARMYSQL;
field->col->mbminmaxlen = idx_field->col->mbminmaxlen;
+ field->col->len = HA_FT_MAXCHARLEN * DATA_MBMAXLEN(field->col->mbminmaxlen);
+
field->fixed_len = 0;
/* Doc ID */
@@ -359,6 +357,7 @@ row_fts_free_pll_merge_buf(
/*********************************************************************//**
Tokenize incoming text data and add to the sort buffer.
+@see row_merge_buf_encode()
@return TRUE if the record passed, FALSE if out of space */
static
ibool
@@ -367,8 +366,6 @@ row_merge_fts_doc_tokenize(
row_merge_buf_t** sort_buf, /*!< in/out: sort buffer */
doc_id_t doc_id, /*!< in: Doc ID */
fts_doc_t* doc, /*!< in: Doc to be tokenized */
- dtype_t* word_dtype, /*!< in: data structure for
- word col */
merge_file_t** merge_file, /*!< in/out: merge file */
ibool opt_doc_id_size,/*!< in: whether to use 4 bytes
instead of 8 bytes integer to
@@ -400,7 +397,7 @@ row_merge_fts_doc_tokenize(
ulint idx = 0;
ib_uint32_t position;
ulint offset = 0;
- ulint cur_len = 0;
+ ulint cur_len;
doc_id_t write_doc_id;
inc = innobase_mysql_fts_get_token(
@@ -454,14 +451,34 @@ row_merge_fts_doc_tokenize(
dfield_set_data(field, t_str.f_str, t_str.f_len);
len = dfield_get_len(field);
- field->type.mtype = word_dtype->mtype;
- field->type.prtype = word_dtype->prtype | DATA_NOT_NULL;
+ dict_col_copy_type(dict_index_get_nth_col(buf->index, 0), &field->type);
+ field->type.prtype |= DATA_NOT_NULL;
+ ut_ad(len <= field->type.len);
- /* Variable length field, set to max size. */
- field->type.len = FTS_MAX_WORD_LEN;
- field->type.mbminmaxlen = word_dtype->mbminmaxlen;
+ /* For the temporary file, row_merge_buf_encode() uses
+ 1 byte for representing the number of extra_size bytes.
+ This number will always be 1, because for this 3-field index
+ consisting of one variable-size column, extra_size will always
+ be 1 or 2, which can be encoded in one byte.
+
+ The extra_size is 1 byte if the length of the
+ variable-length column is less than 128 bytes or the
+ maximum length is less than 256 bytes. */
+
+ /* One variable length column, word with its lenght less than
+ fts_max_token_size, add one extra size and one extra byte.
+
+ Since the max length for FTS token now is larger than 255,
+ so we will need to signify length byte itself, so only 1 to 128
+ bytes can be used for 1 bytes, larger than that 2 bytes. */
+ if (len < 128 || field->type.len < 256) {
+ /* Extra size is one byte. */
+ cur_len = 2 + len;
+ } else {
+ /* Extra size is two bytes. */
+ cur_len = 3 + len;
+ }
- cur_len += len;
dfield_dup(field, buf->heap);
field++;
@@ -511,20 +528,6 @@ row_merge_fts_doc_tokenize(
cur_len += len;
dfield_dup(field, buf->heap);
- /* One variable length column, word with its lenght less than
- fts_max_token_size, add one extra size and one extra byte.
-
- Since the max length for FTS token now is larger than 255,
- so we will need to signify length byte itself, so only 1 to 128
- bytes can be used for 1 bytes, larger than that 2 bytes. */
- if (t_str.f_len < 128) {
- /* Extra size is one byte. */
- cur_len += 2;
- } else {
- /* Extra size is two bytes. */
- cur_len += 3;
- }
-
/* Reserve one byte for the end marker of row_merge_block_t. */
if (buf->total_size + data_size[idx] + cur_len
>= srv_sort_buf_size - 1) {
@@ -617,8 +620,6 @@ fts_parallel_tokenization(
mem_heap_t* blob_heap = NULL;
fts_doc_t doc;
dict_table_t* table = psort_info->psort_common->new_table;
- dtype_t word_dtype;
- dict_field_t* idx_field;
fts_tokenize_ctx_t t_ctx;
ulint retried = 0;
dberr_t error = DB_SUCCESS;
@@ -640,13 +641,6 @@ fts_parallel_tokenization(
doc.charset = fts_index_get_charset(
psort_info->psort_common->dup->index);
- idx_field = dict_index_get_nth_field(
- psort_info->psort_common->dup->index, 0);
- word_dtype.prtype = idx_field->col->prtype;
- word_dtype.mbminmaxlen = idx_field->col->mbminmaxlen;
- word_dtype.mtype = (strcmp(doc.charset->name, "latin1_swedish_ci") == 0)
- ? DATA_VARCHAR : DATA_VARMYSQL;
-
block = psort_info->merge_block;
zip_size = dict_table_zip_size(table);
@@ -696,7 +690,6 @@ loop:
processed = row_merge_fts_doc_tokenize(
buf, doc_item->doc_id, &doc,
- &word_dtype,
merge_file, psort_info->psort_common->opt_doc_id_size,
&t_ctx);
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 9518712e160..f214633e9ce 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -944,14 +944,8 @@ row_merge_read_rec(
ulint data_size;
ulint avail_size;
- ut_ad(block);
- ut_ad(buf);
ut_ad(b >= &block[0]);
ut_ad(b < &block[srv_sort_buf_size]);
- ut_ad(index);
- ut_ad(foffs);
- ut_ad(mrec);
- ut_ad(offsets);
ut_ad(*offsets == 1 + REC_OFFS_HEADER_SIZE
+ dict_index_get_n_fields(index));
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 8d13d436ab8..720a5c049b9 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1279,8 +1279,6 @@ row_upd_index_replace_new_col_vals_index_pos(
ulint n_fields;
const ulint zip_size = dict_table_zip_size(index->table);
- ut_ad(index);
-
dtuple_set_info_bits(entry, update->info_bits);
if (order_only) {
@@ -1465,8 +1463,6 @@ row_upd_changes_ord_field_binary_func(
ulint i;
const dict_index_t* clust_index;
- ut_ad(index);
- ut_ad(update);
ut_ad(thr);
ut_ad(thr->graph);
ut_ad(thr->graph->trx);
diff --git a/storage/innobase/srv/srv0conc.cc b/storage/innobase/srv/srv0conc.cc
index cbf81f8e8f7..f236f9eb9cc 100644
--- a/storage/innobase/srv/srv0conc.cc
+++ b/storage/innobase/srv/srv0conc.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -81,7 +82,9 @@ typedef UT_LIST_NODE_T(struct srv_conc_slot_t) srv_conc_node_t;
/** Slot for a thread waiting in the concurrency control queue. */
struct srv_conc_slot_t{
- os_event_t event; /*!< event to wait */
+ os_event_t event; /*!< event to wait for;
+ os_event_set() and os_event_reset()
+ are protected by srv_conc_mutex */
ibool reserved; /*!< TRUE if slot
reserved */
ibool wait_ended; /*!< TRUE when another thread has
@@ -375,11 +378,11 @@ srv_conc_exit_innodb_without_atomics(
}
}
- os_fast_mutex_unlock(&srv_conc_mutex);
-
if (slot != NULL) {
os_event_set(slot->event);
}
+
+ os_fast_mutex_unlock(&srv_conc_mutex);
}
/*********************************************************************//**
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index ae73fe7df96..756138d9ca7 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation Ab. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -321,11 +321,6 @@ starting from SRV_FORCE_IGNORE_CORRUPT, so that data can be recovered
by SELECT or mysqldump. When this is nonzero, we do not allow any user
modifications to the data. */
UNIV_INTERN ulong srv_force_recovery;
-#ifndef DBUG_OFF
-/** Inject a crash at different steps of the recovery process.
-This is for testing and debugging only. */
-UNIV_INTERN ulong srv_force_recovery_crash;
-#endif /* !DBUG_OFF */
/** Print all user-level transactions deadlocks to mysqld stderr */
@@ -604,7 +599,11 @@ struct srv_sys_t{
ulint n_sys_threads; /*!< size of the sys_threads
array */
- srv_slot_t* sys_threads; /*!< server thread table */
+ srv_slot_t* sys_threads; /*!< server thread table;
+ os_event_set() and
+ os_event_reset() on
+ sys_threads[]->event are
+ covered by srv_sys_t::mutex */
ulint n_threads_active[SRV_MASTER + 1];
/*!< number of threads active
@@ -622,13 +621,16 @@ UNIV_INTERN ib_mutex_t server_mutex;
static srv_sys_t* srv_sys = NULL;
-/** Event to signal the monitor thread. */
+/** Event to signal srv_monitor_thread. Not protected by a mutex.
+Set after setting srv_print_innodb_monitor. */
UNIV_INTERN os_event_t srv_monitor_event;
-/** Event to signal the error thread */
+/** Event to signal the shutdown of srv_error_monitor_thread.
+Not protected by a mutex. */
UNIV_INTERN os_event_t srv_error_event;
-/** Event to signal the buffer pool dump/load thread */
+/** Event for waking up buf_dump_thread. Not protected by a mutex.
+Set on shutdown or by buf_dump_start() or buf_load_start(). */
UNIV_INTERN os_event_t srv_buf_dump_event;
/** The buffer pool dump/load file name */
@@ -789,7 +791,6 @@ srv_suspend_thread_low(
/*===================*/
srv_slot_t* slot) /*!< in/out: thread slot */
{
-
ut_ad(!srv_read_only_mode);
ut_ad(srv_sys_mutex_own());
@@ -847,34 +848,71 @@ srv_suspend_thread(
return(sig_count);
}
-/*********************************************************************//**
-Releases threads of the type given from suspension in the thread table.
-NOTE! The server mutex has to be reserved by the caller!
-@return number of threads released: this may be less than n if not
- enough threads were suspended at the moment. */
-UNIV_INTERN
-ulint
-srv_release_threads(
-/*================*/
- srv_thread_type type, /*!< in: thread type */
- ulint n) /*!< in: number of threads to release */
+/** Resume the calling thread.
+@param[in,out] slot thread slot
+@param[in] sig_count signal count (if wait)
+@param[in] wait whether to wait for the event
+@param[in] timeout_usec timeout in microseconds (0=infinite)
+@return whether the wait timed out */
+static
+bool
+srv_resume_thread(srv_slot_t* slot, ib_int64_t sig_count = 0, bool wait = true,
+ ulint timeout_usec = 0)
+{
+ bool timeout;
+
+ ut_ad(!srv_read_only_mode);
+ ut_ad(slot->in_use);
+ ut_ad(slot->suspended);
+
+ if (!wait) {
+ timeout = false;
+ } else if (timeout_usec) {
+ timeout = OS_SYNC_TIME_EXCEEDED == os_event_wait_time_low(
+ slot->event, timeout_usec, sig_count);
+ } else {
+ timeout = false;
+ os_event_wait_low(slot->event, sig_count);
+ }
+
+ srv_sys_mutex_enter();
+ ut_ad(slot->in_use);
+ ut_ad(slot->suspended);
+
+ slot->suspended = FALSE;
+ ++srv_sys->n_threads_active[slot->type];
+ srv_sys_mutex_exit();
+ return(timeout);
+}
+
+/** Ensure that a given number of threads of the type given are running
+(or are already terminated).
+@param[in] type thread type
+@param[in] n number of threads that have to run */
+void
+srv_release_threads(enum srv_thread_type type, ulint n)
{
- ulint i;
- ulint count = 0;
+ ulint running;
ut_ad(srv_thread_type_validate(type));
ut_ad(n > 0);
- srv_sys_mutex_enter();
+ do {
+ running = 0;
- for (i = 0; i < srv_sys->n_sys_threads; i++) {
- srv_slot_t* slot;
+ srv_sys_mutex_enter();
- slot = &srv_sys->sys_threads[i];
+ for (ulint i = 0; i < srv_sys->n_sys_threads; i++) {
+ srv_slot_t* slot = &srv_sys->sys_threads[i];
- if (slot->in_use
- && srv_slot_get_type(slot) == type
- && slot->suspended) {
+ if (!slot->in_use || srv_slot_get_type(slot) != type) {
+ continue;
+ } else if (!slot->suspended) {
+ if (++running >= n) {
+ break;
+ }
+ continue;
+ }
switch (type) {
case SRV_NONE:
@@ -904,21 +942,11 @@ srv_release_threads(
break;
}
- slot->suspended = FALSE;
-
- ++srv_sys->n_threads_active[type];
-
os_event_set(slot->event);
-
- if (++count == n) {
- break;
- }
}
- }
-
- srv_sys_mutex_exit();
- return(count);
+ srv_sys_mutex_exit();
+ } while (running && running < n);
}
/*********************************************************************//**
@@ -931,11 +959,8 @@ srv_free_slot(
{
srv_sys_mutex_enter();
- if (!slot->suspended) {
- /* Mark the thread as inactive. */
- srv_suspend_thread_low(slot);
- }
-
+ /* Mark the thread as inactive. */
+ srv_suspend_thread_low(slot);
/* Free the slot for reuse. */
ut_ad(slot->in_use);
slot->in_use = FALSE;
@@ -1994,15 +2019,7 @@ srv_active_wake_master_thread(void)
if (slot->in_use) {
ut_a(srv_slot_get_type(slot) == SRV_MASTER);
-
- if (slot->suspended) {
-
- slot->suspended = FALSE;
-
- ++srv_sys->n_threads_active[SRV_MASTER];
-
- os_event_set(slot->event);
- }
+ os_event_set(slot->event);
}
srv_sys_mutex_exit();
@@ -2468,7 +2485,7 @@ suspend_thread:
manual also mentions this string in several places. */
srv_main_thread_op_info = "waiting for server activity";
- os_event_wait(slot->event);
+ srv_resume_thread(slot);
if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
os_thread_exit(NULL);
@@ -2580,8 +2597,7 @@ DECLARE_THREAD(srv_worker_thread)(
do {
srv_suspend_thread(slot);
-
- os_event_wait(slot->event);
+ srv_resume_thread(slot);
if (srv_task_execute()) {
@@ -2717,8 +2733,6 @@ srv_purge_coordinator_suspend(
ib_int64_t sig_count = srv_suspend_thread(slot);
do {
- ulint ret;
-
rw_lock_x_lock(&purge_sys->latch);
purge_sys->running = false;
@@ -2727,32 +2741,11 @@ srv_purge_coordinator_suspend(
/* We don't wait right away on the the non-timed wait because
we want to signal the thread that wants to suspend purge. */
-
- if (stop) {
- os_event_wait_low(slot->event, sig_count);
- ret = 0;
- } else if (rseg_history_len <= trx_sys->rseg_history_len) {
- ret = os_event_wait_time_low(
- slot->event, SRV_PURGE_MAX_TIMEOUT, sig_count);
- } else {
- /* We don't want to waste time waiting, if the
- history list increased by the time we got here,
- unless purge has been stopped. */
- ret = 0;
- }
-
- srv_sys_mutex_enter();
-
- /* The thread can be in state !suspended after the timeout
- but before this check if another thread sent a wakeup signal. */
-
- if (slot->suspended) {
- slot->suspended = FALSE;
- ++srv_sys->n_threads_active[slot->type];
- ut_a(srv_sys->n_threads_active[slot->type] == 1);
- }
-
- srv_sys_mutex_exit();
+ const bool wait = stop
+ || rseg_history_len <= trx_sys->rseg_history_len;
+ const bool timeout = srv_resume_thread(
+ slot, sig_count, wait,
+ stop ? 0 : SRV_PURGE_MAX_TIMEOUT);
sig_count = srv_suspend_thread(slot);
@@ -2764,6 +2757,19 @@ srv_purge_coordinator_suspend(
if (!stop) {
ut_a(purge_sys->n_stop == 0);
purge_sys->running = true;
+
+ if (timeout
+ && rseg_history_len == trx_sys->rseg_history_len
+ && trx_sys->rseg_history_len < 5000) {
+ /* No new records were added since the
+ wait started. Simply wait for new
+ records. The magic number 5000 is an
+ approximation for the case where we
+ have cached UNDO log records which
+ prevent truncate of the UNDO
+ segments. */
+ stop = true;
+ }
} else {
ut_a(purge_sys->n_stop > 0);
@@ -2772,33 +2778,9 @@ srv_purge_coordinator_suspend(
}
rw_lock_x_unlock(&purge_sys->latch);
-
- if (ret == OS_SYNC_TIME_EXCEEDED) {
-
- /* No new records added since wait started then simply
- wait for new records. The magic number 5000 is an
- approximation for the case where we have cached UNDO
- log records which prevent truncate of the UNDO
- segments. */
-
- if (rseg_history_len == trx_sys->rseg_history_len
- && trx_sys->rseg_history_len < 5000) {
-
- stop = true;
- }
- }
-
} while (stop);
- srv_sys_mutex_enter();
-
- if (slot->suspended) {
- slot->suspended = FALSE;
- ++srv_sys->n_threads_active[slot->type];
- ut_a(srv_sys->n_threads_active[slot->type] == 1);
- }
-
- srv_sys_mutex_exit();
+ srv_resume_thread(slot, 0, false);
}
/*********************************************************************//**
@@ -2851,8 +2833,9 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
srv_purge_coordinator_suspend(slot, rseg_history_len);
}
+ ut_ad(!slot->suspended);
+
if (srv_purge_should_exit(n_total_purged)) {
- ut_a(!slot->suspended);
break;
}
@@ -2963,12 +2946,10 @@ srv_get_task_queue_length(void)
return(n_tasks);
}
-/**********************************************************************//**
-Wakeup the purge threads. */
+/** Wake up the purge threads. */
UNIV_INTERN
void
-srv_purge_wakeup(void)
-/*==================*/
+srv_purge_wakeup()
{
ut_ad(!srv_read_only_mode);
@@ -2983,4 +2964,3 @@ srv_purge_wakeup(void)
}
}
}
-
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 044a087b53c..d1e53d0e0dd 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -3,6 +3,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
+Copyright (c) 2013, 2017, MariaDB Corporation
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -582,19 +583,6 @@ create_log_file(
/** Initial number of the first redo log file */
#define INIT_LOG_FILE0 (SRV_N_LOG_FILES_MAX + 1)
-#ifdef DBUG_OFF
-# define RECOVERY_CRASH(x) do {} while(0)
-#else
-# define RECOVERY_CRASH(x) do { \
- if (srv_force_recovery_crash == x) { \
- fprintf(stderr, "innodb_force_recovery_crash=%lu\n", \
- srv_force_recovery_crash); \
- fflush(stderr); \
- exit(3); \
- } \
-} while (0)
-#endif
-
/*********************************************************************//**
Creates all log files.
@return DB_SUCCESS or error code */
@@ -635,13 +623,14 @@ create_log_files(
file should be recoverable. The buffer
pool was clean, and we can simply create
all log files from the scratch. */
- RECOVERY_CRASH(6);
+ DBUG_EXECUTE_IF("innodb_log_abort_6",
+ return(DB_ERROR););
}
}
ut_ad(!buf_pool_check_no_pending_io());
- RECOVERY_CRASH(7);
+ DBUG_EXECUTE_IF("innodb_log_abort_7", return(DB_ERROR););
for (unsigned i = 0; i < srv_n_log_files; i++) {
sprintf(logfilename + dirnamelen,
@@ -654,7 +643,7 @@ create_log_files(
}
}
- RECOVERY_CRASH(8);
+ DBUG_EXECUTE_IF("innodb_log_abort_8", return(DB_ERROR););
/* We did not create the first log file initially as
ib_logfile0, so that crash recovery cannot find it until it
@@ -699,10 +688,16 @@ create_log_files(
return(DB_SUCCESS);
}
-/*********************************************************************//**
-Renames the first log file. */
+/** Rename the first redo log file.
+@param[in,out] logfilename buffer for the log file name
+@param[in] dirnamelen length of the directory path
+@param[in] lsn FIL_PAGE_FILE_FLUSH_LSN value
+@param[in,out] logfile0 name of the first log file
+@return error code
+@retval DB_SUCCESS on successful operation */
+MY_ATTRIBUTE((warn_unused_result, nonnull))
static
-void
+dberr_t
create_log_files_rename(
/*====================*/
char* logfilename, /*!< in/out: buffer for log file name */
@@ -713,6 +708,9 @@ create_log_files_rename(
/* If innodb_flush_method=O_DSYNC,
we need to explicitly flush the log buffers. */
fil_flush(SRV_LOG_SPACE_FIRST_ID);
+
+ DBUG_EXECUTE_IF("innodb_log_abort_9", return(DB_ERROR););
+
/* Close the log files, so that we can rename
the first one. */
fil_close_log_files(false);
@@ -721,26 +719,28 @@ create_log_files_rename(
checkpoint has been created. */
sprintf(logfilename + dirnamelen, "ib_logfile%u", 0);
- RECOVERY_CRASH(9);
-
ib_logf(IB_LOG_LEVEL_INFO,
"Renaming log file %s to %s", logfile0, logfilename);
mutex_enter(&log_sys->mutex);
ut_ad(strlen(logfile0) == 2 + strlen(logfilename));
- ibool success = os_file_rename(
- innodb_file_log_key, logfile0, logfilename);
- ut_a(success);
-
- RECOVERY_CRASH(10);
+ dberr_t err = os_file_rename(
+ innodb_file_log_key, logfile0, logfilename)
+ ? DB_SUCCESS : DB_ERROR;
/* Replace the first file with ib_logfile0. */
strcpy(logfile0, logfilename);
mutex_exit(&log_sys->mutex);
- fil_open_log_and_system_tablespace_files();
+ DBUG_EXECUTE_IF("innodb_log_abort_10", err = DB_ERROR;);
- ib_logf(IB_LOG_LEVEL_WARN, "New log files created, LSN=" LSN_PF, lsn);
+ if (err == DB_SUCCESS) {
+ fil_open_log_and_system_tablespace_files();
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "New log files created, LSN=" LSN_PF, lsn);
+ }
+
+ return(err);
}
/*********************************************************************//**
@@ -1554,8 +1554,6 @@ innobase_start_or_create_for_mysql(void)
ulint max_arch_log_no;
#endif /* UNIV_LOG_ARCHIVE */
ulint sum_of_new_sizes;
- ulint sum_of_data_file_sizes;
- ulint tablespace_size_in_header;
dberr_t err;
unsigned i;
ulint srv_n_log_files_found = srv_n_log_files;
@@ -1568,6 +1566,19 @@ innobase_start_or_create_for_mysql(void)
size_t dirnamelen;
bool sys_datafiles_created = false;
+ /* Check that os_fast_mutexes work as expected */
+ os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &srv_os_test_mutex);
+
+ ut_a(0 == os_fast_mutex_trylock(&srv_os_test_mutex));
+
+ os_fast_mutex_unlock(&srv_os_test_mutex);
+
+ os_fast_mutex_lock(&srv_os_test_mutex);
+
+ os_fast_mutex_unlock(&srv_os_test_mutex);
+
+ os_fast_mutex_free(&srv_os_test_mutex);
+
high_level_read_only = srv_read_only_mode
|| srv_force_recovery > SRV_FORCE_NO_TRX_UNDO;
@@ -1610,22 +1621,7 @@ innobase_start_or_create_for_mysql(void)
#endif /* PAGE_ATOMIC_REF_COUNT */
);
-
- if (sizeof(ulint) != sizeof(void*)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: size of InnoDB's ulint is %lu, "
- "but size of void*\n", (ulong) sizeof(ulint));
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: is %lu. The sizes should be the same "
- "so that on a 64-bit\n",
- (ulong) sizeof(void*));
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: platforms you can allocate more than 4 GB "
- "of memory.\n");
- }
+ compile_time_assert(sizeof(ulint) == sizeof(void*));
#ifdef UNIV_DEBUG
ut_print_timestamp(stderr);
@@ -2203,14 +2199,18 @@ innobase_start_or_create_for_mysql(void)
dirnamelen, max_flushed_lsn,
logfile0);
+ if (err == DB_SUCCESS) {
+ err = create_log_files_rename(
+ logfilename,
+ dirnamelen,
+ max_flushed_lsn,
+ logfile0);
+ }
+
if (err != DB_SUCCESS) {
return(err);
}
- create_log_files_rename(
- logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
-
/* Suppress the message about
crash recovery. */
max_flushed_lsn = min_flushed_lsn
@@ -2336,7 +2336,6 @@ files_checked:
trx_sys_create();
if (create_new_db) {
-
ut_a(!srv_read_only_mode);
mtr_start(&mtr);
@@ -2379,8 +2378,12 @@ files_checked:
fil_flush_file_spaces(FIL_TABLESPACE);
- create_log_files_rename(logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
+ err = create_log_files_rename(logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
#ifdef UNIV_LOG_ARCHIVE
} else if (srv_archive_recovery) {
@@ -2450,24 +2453,89 @@ files_checked:
LOG_CHECKPOINT, LSN_MAX,
min_flushed_lsn, max_flushed_lsn);
+ if (err == DB_SUCCESS) {
+ /* Initialize the change buffer. */
+ err = dict_boot();
+ }
+
if (err != DB_SUCCESS) {
+ return(err);
+ }
- return(DB_ERROR);
+ if (!srv_read_only_mode) {
+ if (sum_of_new_sizes > 0) {
+ /* New data file(s) were added */
+ mtr_start(&mtr);
+ fsp_header_inc_size(0, sum_of_new_sizes, &mtr);
+ mtr_commit(&mtr);
+ /* Immediately write the log record about
+ increased tablespace size to disk, so that it
+ is durable even if mysqld would crash
+ quickly */
+ log_buffer_flush_to_disk();
+ }
}
- /* Since the insert buffer init is in dict_boot, and the
- insert buffer is needed in any disk i/o, first we call
- dict_boot(). Note that trx_sys_init_at_db_start() only needs
- to access space 0, and the insert buffer at this stage already
- works for space 0. */
+ const ulint tablespace_size_in_header
+ = fsp_header_get_tablespace_size();
- err = dict_boot();
+#ifdef UNIV_DEBUG
+ /* buf_debug_prints = TRUE; */
+#endif /* UNIV_DEBUG */
+ ulint sum_of_data_file_sizes = 0;
+
+ for (ulint d = 0; d < srv_n_data_files; d++) {
+ sum_of_data_file_sizes += srv_data_file_sizes[d];
+ }
+
+ /* Compare the system tablespace file size to what is
+ stored in FSP_SIZE. In open_or_create_data_files()
+ we already checked that the file sizes match the
+ innodb_data_file_path specification. */
+ if (srv_read_only_mode
+ || sum_of_data_file_sizes == tablespace_size_in_header) {
+ /* Do not complain about the size. */
+ } else if (!srv_auto_extend_last_data_file
+ || sum_of_data_file_sizes
+ < tablespace_size_in_header) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tablespace size stored in header is " ULINTPF
+ " pages, but the sum of data file sizes is "
+ ULINTPF " pages",
+ tablespace_size_in_header,
+ sum_of_data_file_sizes);
+
+ if (srv_force_recovery == 0
+ && sum_of_data_file_sizes
+ < tablespace_size_in_header) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot start InnoDB. The tail of"
+ " the system tablespace is"
+ " missing. Have you edited"
+ " innodb_data_file_path in my.cnf"
+ " in an inappropriate way, removing"
+ " data files from there?"
+ " You can set innodb_force_recovery=1"
+ " in my.cnf to force"
+ " a startup if you are trying to"
+ " recover a badly corrupt database.");
- if (err != DB_SUCCESS) {
- return(err);
+ return(DB_ERROR);
+ }
}
+ /* This must precede recv_apply_hashed_log_recs(TRUE). */
ib_bh = trx_sys_init_at_db_start();
+
+ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
+ /* Apply the hashed log records to the
+ respective file pages, for the last batch of
+ recv_group_scan_log_recs(). */
+
+ recv_apply_hashed_log_recs(TRUE);
+ DBUG_PRINT("ib_log", ("apply completed"));
+ }
+
n_recovered_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list);
/* The purge system needs to create the purge view and
@@ -2536,7 +2604,8 @@ files_checked:
ULINT_MAX, LSN_MAX, NULL);
ut_a(success);
- RECOVERY_CRASH(1);
+ DBUG_EXECUTE_IF("innodb_log_abort_1",
+ return(DB_ERROR););
min_flushed_lsn = max_flushed_lsn = log_get_lsn();
@@ -2551,8 +2620,6 @@ files_checked:
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
- RECOVERY_CRASH(2);
-
/* Flush the old log files. */
log_buffer_flush_to_disk();
/* If innodb_flush_method=O_DSYNC,
@@ -2567,21 +2634,27 @@ files_checked:
ut_d(recv_no_log_write = TRUE);
ut_ad(!buf_pool_check_no_pending_io());
- RECOVERY_CRASH(3);
+ DBUG_EXECUTE_IF("innodb_log_abort_3",
+ return(DB_ERROR););
/* Stamp the LSN to the data files. */
fil_write_flushed_lsn_to_data_files(
max_flushed_lsn, 0);
- fil_flush_file_spaces(FIL_TABLESPACE);
+ DBUG_EXECUTE_IF("innodb_log_abort_4", err = DB_ERROR;);
- RECOVERY_CRASH(4);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ fil_flush_file_spaces(FIL_TABLESPACE);
/* Close and free the redo log files, so that
we can replace them. */
fil_close_log_files(true);
- RECOVERY_CRASH(5);
+ DBUG_EXECUTE_IF("innodb_log_abort_5",
+ return(DB_ERROR););
/* Free the old log file space. */
log_group_close_all();
@@ -2595,12 +2668,15 @@ files_checked:
dirnamelen, max_flushed_lsn,
logfile0);
+ if (err == DB_SUCCESS) {
+ err = create_log_files_rename(
+ logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+ }
+
if (err != DB_SUCCESS) {
return(err);
}
-
- create_log_files_rename(logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
}
srv_startup_is_before_trx_rollback_phase = FALSE;
@@ -2614,20 +2690,8 @@ files_checked:
trx_sys_file_format_tag_init();
}
- if (!create_new_db && sum_of_new_sizes > 0) {
- /* New data file(s) were added */
- mtr_start(&mtr);
-
- fsp_header_inc_size(0, sum_of_new_sizes, &mtr);
-
- mtr_commit(&mtr);
-
- /* Immediately write the log record about increased tablespace
- size to disk, so that it is durable even if mysqld would crash
- quickly */
-
- log_buffer_flush_to_disk();
- }
+ ut_ad(err == DB_SUCCESS);
+ ut_a(sum_of_new_sizes != ULINT_UNDEFINED);
#ifdef UNIV_LOG_ARCHIVE
/* Archiving is always off under MySQL */
@@ -2770,125 +2834,6 @@ files_checked:
buf_flush_page_cleaner_thread_started = true;
}
-#ifdef UNIV_DEBUG
- /* buf_debug_prints = TRUE; */
-#endif /* UNIV_DEBUG */
- sum_of_data_file_sizes = 0;
-
- for (i = 0; i < srv_n_data_files; i++) {
- sum_of_data_file_sizes += srv_data_file_sizes[i];
- }
-
- tablespace_size_in_header = fsp_header_get_tablespace_size();
-
- if (!srv_read_only_mode
- && !srv_auto_extend_last_data_file
- && sum_of_data_file_sizes != tablespace_size_in_header) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: tablespace size"
- " stored in header is %lu pages, but\n",
- (ulong) tablespace_size_in_header);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- "InnoDB: the sum of data file sizes is %lu pages\n",
- (ulong) sum_of_data_file_sizes);
-
- if (srv_force_recovery == 0
- && sum_of_data_file_sizes < tablespace_size_in_header) {
- /* This is a fatal error, the tail of a tablespace is
- missing */
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot start InnoDB."
- " The tail of the system tablespace is\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: missing. Have you edited"
- " innodb_data_file_path in my.cnf in an\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: inappropriate way, removing"
- " ibdata files from there?\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: You can set innodb_force_recovery=1"
- " in my.cnf to force\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: a startup if you are trying"
- " to recover a badly corrupt database.\n");
-
- return(DB_ERROR);
- }
- }
-
- if (!srv_read_only_mode
- && srv_auto_extend_last_data_file
- && sum_of_data_file_sizes < tablespace_size_in_header) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: tablespace size stored in header"
- " is %lu pages, but\n",
- (ulong) tablespace_size_in_header);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: the sum of data file sizes"
- " is only %lu pages\n",
- (ulong) sum_of_data_file_sizes);
-
- if (srv_force_recovery == 0) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot start InnoDB. The tail of"
- " the system tablespace is\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: missing. Have you edited"
- " innodb_data_file_path in my.cnf in an\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: inappropriate way, removing"
- " ibdata files from there?\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: You can set innodb_force_recovery=1"
- " in my.cnf to force\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: a startup if you are trying to"
- " recover a badly corrupt database.\n");
-
- return(DB_ERROR);
- }
- }
-
- /* Check that os_fast_mutexes work as expected */
- os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &srv_os_test_mutex);
-
- if (0 != os_fast_mutex_trylock(&srv_os_test_mutex)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: pthread_mutex_trylock returns"
- " an unexpected value on\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: success! Cannot continue.\n");
- exit(1);
- }
-
- os_fast_mutex_unlock(&srv_os_test_mutex);
-
- os_fast_mutex_lock(&srv_os_test_mutex);
-
- os_fast_mutex_unlock(&srv_os_test_mutex);
-
- os_fast_mutex_free(&srv_os_test_mutex);
-
if (srv_print_verbose_log) {
ib_logf(IB_LOG_LEVEL_INFO,
"%s started; log sequence number " LSN_PF "",
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index 2fe40109511..df360d221da 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1235,7 +1236,7 @@ sync_thread_add_level(
case SYNC_TRX_UNDO_PAGE:
/* Purge is allowed to read in as many UNDO pages as it likes,
there was a bogus rule here earlier that forced the caller to
- acquire the purge_sys_t::mutex. The purge mutex did not really
+ acquire the trx_purge_t::mutex. The purge mutex did not really
protect anything because it was only ever acquired by the
single purge thread. The purge thread can read the UNDO pages
without any covering mutex. */
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index efc600d16b1..f24c8f22277 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -171,10 +172,6 @@ trx_purge_sys_close(void)
sess_close(purge_sys->sess);
- purge_sys->sess = NULL;
-
- purge_sys->view = NULL;
-
rw_lock_free(&purge_sys->latch);
mutex_free(&purge_sys->bh_mutex);
@@ -183,9 +180,6 @@ trx_purge_sys_close(void)
ib_bh_free(purge_sys->ib_bh);
os_event_free(purge_sys->event);
-
- purge_sys->event = NULL;
-
mem_free(purge_sys);
purge_sys = NULL;
@@ -1301,20 +1295,16 @@ void
trx_purge_stop(void)
/*================*/
{
- purge_state_t state;
- ib_int64_t sig_count = os_event_reset(purge_sys->event);
-
ut_a(srv_n_purge_threads > 0);
rw_lock_x_lock(&purge_sys->latch);
- ut_a(purge_sys->state != PURGE_STATE_INIT);
- ut_a(purge_sys->state != PURGE_STATE_EXIT);
- ut_a(purge_sys->state != PURGE_STATE_DISABLED);
+ const ib_int64_t sig_count = os_event_reset(purge_sys->event);
+ const purge_state_t state = purge_sys->state;
- ++purge_sys->n_stop;
+ ut_a(state == PURGE_STATE_RUN || state == PURGE_STATE_STOP);
- state = purge_sys->state;
+ ++purge_sys->n_stop;
if (state == PURGE_STATE_RUN) {
ib_logf(IB_LOG_LEVEL_INFO, "Stopping purge");
@@ -1327,33 +1317,29 @@ trx_purge_stop(void)
purge_sys->state = PURGE_STATE_STOP;
- rw_lock_x_unlock(&purge_sys->latch);
-
if (state != PURGE_STATE_STOP) {
-
+ rw_lock_x_unlock(&purge_sys->latch);
/* Wait for purge coordinator to signal that it
is suspended. */
os_event_wait_low(purge_sys->event, sig_count);
- } else {
- bool once = true;
-
- rw_lock_x_lock(&purge_sys->latch);
+ } else {
+ bool once = true;
- /* Wait for purge to signal that it has actually stopped. */
- while (purge_sys->running) {
+ /* Wait for purge to signal that it has actually stopped. */
+ while (purge_sys->running) {
- if (once) {
+ if (once) {
ib_logf(IB_LOG_LEVEL_INFO,
"Waiting for purge to stop");
- once = false;
+ once = false;
}
rw_lock_x_unlock(&purge_sys->latch);
- os_thread_sleep(10000);
+ os_thread_sleep(10000);
rw_lock_x_lock(&purge_sys->latch);
- }
+ }
rw_lock_x_unlock(&purge_sys->latch);
}
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 8ff9180eaf2..67ad96425b1 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -1324,7 +1324,9 @@ trx_sys_close(void)
ut_a(UT_LIST_GET_LEN(trx_sys->ro_trx_list) == 0);
/* Only prepared transactions may be left in the system. Free them. */
- ut_a(UT_LIST_GET_LEN(trx_sys->rw_trx_list) == trx_sys->n_prepared_trx);
+ ut_a(UT_LIST_GET_LEN(trx_sys->rw_trx_list) == trx_sys->n_prepared_trx
+ || srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
while ((trx = UT_LIST_GET_FIRST(trx_sys->rw_trx_list)) != NULL) {
trx_free_prepared(trx);
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 3b39f685e6d..bfc1c546510 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -310,7 +310,11 @@ trx_free_prepared(
/*==============*/
trx_t* trx) /*!< in, own: trx object */
{
- ut_a(trx_state_eq(trx, TRX_STATE_PREPARED));
+ ut_a(trx_state_eq(trx, TRX_STATE_PREPARED)
+ || (trx_state_eq(trx, TRX_STATE_ACTIVE)
+ && trx->is_recovered
+ && (srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO)));
ut_a(trx->magic_n == TRX_MAGIC_N);
lock_trx_release_locks(trx);
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index cdd23726f2e..220589dd9ff 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2011,13 +2012,37 @@ trx_undo_free_prepared(
ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS);
if (trx->update_undo) {
- ut_a(trx->update_undo->state == TRX_UNDO_PREPARED);
+ switch (trx->update_undo->state) {
+ case TRX_UNDO_PREPARED:
+ break;
+ case TRX_UNDO_ACTIVE:
+ /* lock_trx_release_locks() assigns
+ trx->is_recovered=false */
+ ut_a(srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ break;
+ default:
+ ut_error;
+ }
+
UT_LIST_REMOVE(undo_list, trx->rseg->update_undo_list,
trx->update_undo);
trx_undo_mem_free(trx->update_undo);
}
if (trx->insert_undo) {
- ut_a(trx->insert_undo->state == TRX_UNDO_PREPARED);
+ switch (trx->insert_undo->state) {
+ case TRX_UNDO_PREPARED:
+ break;
+ case TRX_UNDO_ACTIVE:
+ /* lock_trx_release_locks() assigns
+ trx->is_recovered=false */
+ ut_a(srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ break;
+ default:
+ ut_error;
+ }
+
UT_LIST_REMOVE(undo_list, trx->rseg->insert_undo_list,
trx->insert_undo);
trx_undo_mem_free(trx->insert_undo);
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 14ab6ec7599..3e0872b9a89 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -3609,10 +3609,6 @@ static int ha_maria_init(void *p)
maria_pagecache->extra_debug= 1;
maria_assert_if_crashed_table= debug_assert_if_crashed_table;
-#if defined(HAVE_REALPATH) && !defined(HAVE_valgrind) && !defined(HAVE_BROKEN_REALPATH)
- /* We can only test for sub paths if my_symlink.c is using realpath */
- maria_test_invalid_symlink= test_if_data_home_dir;
-#endif
if (res)
maria_hton= 0;
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index a189af40362..bd5c67c0409 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -2839,7 +2839,7 @@ int maria_repair(HA_CHECK *param, register MARIA_HA *info,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0)) |
sync_dir) ||
- _ma_open_datafile(info, share, NullS, -1))
+ _ma_open_datafile(info, share))
{
goto err;
}
@@ -3994,7 +3994,7 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0)) |
sync_dir) ||
- _ma_open_datafile(info, share, NullS, -1))
+ _ma_open_datafile(info, share))
{
_ma_check_print_error(param, "Couldn't change to new data file");
goto err;
@@ -4627,7 +4627,7 @@ err:
MYF((param->testflag & T_BACKUP_DATA ?
MY_REDEL_MAKE_BACKUP : 0) |
sync_dir)) ||
- _ma_open_datafile(info,share, NullS, -1))
+ _ma_open_datafile(info,share))
got_error=1;
}
}
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index 1176b2037b5..14809636616 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -53,7 +53,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
uint max_field_lengths, extra_header_size, column_nr;
uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
- char filename[FN_REFLEN], linkname[FN_REFLEN], *linkname_ptr;
+ char kfilename[FN_REFLEN], klinkname[FN_REFLEN], *klinkname_ptr;
+ char dfilename[FN_REFLEN], dlinkname[FN_REFLEN], *dlinkname_ptr;
ulong pack_reclength;
ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
@@ -808,19 +809,19 @@ int maria_create(const char *name, enum data_file_type datafile_type,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->index_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->index_file_name, MARIA_NAME_IEXT,
+ fn_format(kfilename, name, ci->index_file_name, MARIA_NAME_IEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME |
MY_RETURN_REAL_PATH | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->index_file_name, "", MARIA_NAME_IEXT,
+ fn_format(kfilename, ci->index_file_name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "", MARIA_NAME_IEXT,
+ fn_format(klinkname, name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT);
- linkname_ptr= linkname;
+ klinkname_ptr= klinkname;
/*
Don't create the table if the link or file exists to ensure that one
doesn't accidently destroy another table.
@@ -834,10 +835,10 @@ int maria_create(const char *name, enum data_file_type datafile_type,
{
char *iext= strrchr(name, '.');
int have_iext= iext && !strcmp(iext, MARIA_NAME_IEXT);
- fn_format(filename, name, "", MARIA_NAME_IEXT,
+ fn_format(kfilename, name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
- linkname_ptr= NullS;
+ klinkname_ptr= NullS;
/*
Replace the current file.
Don't sync dir now if the data file has the same path.
@@ -857,7 +858,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
NOTE: The filename is compared against unique_file_name of every
open table. Hence we need a real path here.
*/
- if (!internal_table && _ma_test_if_reopen(filename))
+ if (!internal_table && _ma_test_if_reopen(kfilename))
{
my_printf_error(HA_ERR_TABLE_EXIST, "Aria table '%s' is in use "
"(most likely by a MERGE table). Try FLUSH TABLES.",
@@ -866,8 +867,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
goto err;
}
- if ((file= mysql_file_create_with_symlink(key_file_kfile, linkname_ptr,
- filename, 0, create_mode,
+ if ((file= mysql_file_create_with_symlink(key_file_kfile, klinkname_ptr,
+ kfilename, 0, create_mode,
MYF(MY_WME|create_flag))) < 0)
goto err;
errpos=1;
@@ -1121,30 +1122,30 @@ int maria_create(const char *name, enum data_file_type datafile_type,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->data_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->data_file_name, MARIA_NAME_DEXT,
+ fn_format(dfilename, name, ci->data_file_name, MARIA_NAME_DEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->data_file_name, "", MARIA_NAME_DEXT,
+ fn_format(dfilename, ci->data_file_name, "", MARIA_NAME_DEXT,
MY_UNPACK_FILENAME |
(have_dext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "",MARIA_NAME_DEXT,
+ fn_format(dlinkname, name, "",MARIA_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr= linkname;
+ dlinkname_ptr= dlinkname;
create_flag=0;
}
else
{
- fn_format(filename,name,"", MARIA_NAME_DEXT,
+ fn_format(dfilename,name,"", MARIA_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr= NullS;
+ dlinkname_ptr= NullS;
create_flag= (flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
if ((dfile=
- mysql_file_create_with_symlink(key_file_dfile, linkname_ptr,
- filename, 0, create_mode,
+ mysql_file_create_with_symlink(key_file_dfile, dlinkname_ptr,
+ dfilename, 0, create_mode,
MYF(MY_WME | create_flag | sync_dir))) < 0)
goto err;
errpos=3;
@@ -1194,19 +1195,21 @@ err_no_lock:
mysql_file_close(dfile, MYF(0));
/* fall through */
case 2:
- if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(key_file_dfile,
- fn_format(filename,name,"",MARIA_NAME_DEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- sync_dir);
+ if (! (flags & HA_DONT_TOUCH_DATA))
+ {
+ mysql_file_delete(key_file_dfile, dfilename, MYF(sync_dir));
+ if (dlinkname_ptr)
+ mysql_file_delete(key_file_dfile, dlinkname_ptr, MYF(sync_dir));
+ }
/* fall through */
case 1:
mysql_file_close(file, MYF(0));
if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(key_file_kfile,
- fn_format(filename,name,"",MARIA_NAME_IEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- sync_dir);
+ {
+ mysql_file_delete(key_file_kfile, kfilename, MYF(sync_dir));
+ if (klinkname_ptr)
+ mysql_file_delete(key_file_kfile, klinkname_ptr, MYF(sync_dir));
+ }
}
my_free(log_data);
my_free(rec_per_key_part);
diff --git a/storage/maria/ma_delete_table.c b/storage/maria/ma_delete_table.c
index 56a6dfc8e5f..a9ab8e596cc 100644
--- a/storage/maria/ma_delete_table.c
+++ b/storage/maria/ma_delete_table.c
@@ -84,22 +84,13 @@ int maria_delete_table(const char *name)
int maria_delete_table_files(const char *name, myf sync_dir)
{
- char from[FN_REFLEN];
DBUG_ENTER("maria_delete_table_files");
- fn_format(from,name,"",MARIA_NAME_IEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT);
- if (mysql_file_delete_with_symlink(key_file_kfile, from,
- MYF(MY_WME | sync_dir)))
- DBUG_RETURN(my_errno);
- fn_format(from,name,"",MARIA_NAME_DEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT);
- if (mysql_file_delete_with_symlink(key_file_dfile, from,
- MYF(MY_WME | sync_dir)))
+ if (my_handler_delete_with_symlink(key_file_kfile, name, MARIA_NAME_IEXT, MYF(MY_WME | sync_dir)) ||
+ my_handler_delete_with_symlink(key_file_dfile, name, MARIA_NAME_DEXT, MYF(MY_WME | sync_dir)))
DBUG_RETURN(my_errno);
- // optional files from maria_pack:
- fn_format(from,name,"",".TMD",MY_UNPACK_FILENAME|MY_APPEND_EXT);
- mysql_file_delete_with_symlink(key_file_dfile, from, MYF(0));
- fn_format(from,name,"",".OLD",MY_UNPACK_FILENAME|MY_APPEND_EXT);
- mysql_file_delete_with_symlink(key_file_dfile, from, MYF(0));
+ my_handler_delete_with_symlink(key_file_dfile, name, ".TMD", MYF(0));
+ my_handler_delete_with_symlink(key_file_dfile, name, ".OLD", MYF(0));
DBUG_RETURN(0);
}
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 1d274d796be..1db85180fcf 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -86,7 +86,7 @@ MARIA_HA *_ma_test_if_reopen(const char *filename)
*/
-static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
+static MARIA_HA *maria_clone_internal(MARIA_SHARE *share,
int mode, File data_file,
uint internal_table)
{
@@ -106,7 +106,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
}
if (data_file >= 0)
info.dfile.file= data_file;
- else if (_ma_open_datafile(&info, share, name, -1))
+ else if (_ma_open_datafile(&info, share))
goto err;
errpos= 5;
@@ -252,7 +252,7 @@ MARIA_HA *maria_clone(MARIA_SHARE *share, int mode)
{
MARIA_HA *new_info;
mysql_mutex_lock(&THR_LOCK_maria);
- new_info= maria_clone_internal(share, NullS, mode,
+ new_info= maria_clone_internal(share, mode,
share->data_file_type == BLOCK_RECORD ?
share->bitmap.file.file : -1, 0);
mysql_mutex_unlock(&THR_LOCK_maria);
@@ -298,8 +298,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
realpath_err= my_realpath(name_buff, fn_format(org_name, name, "",
MARIA_NAME_IEXT,
MY_UNPACK_FILENAME),MYF(0));
+ if (realpath_err > 0) /* File not found, no point in looking further. */
+ {
+ DBUG_RETURN(NULL);
+ }
+
if (my_is_symlink(org_name) &&
- (realpath_err || (*maria_test_invalid_symlink)(name_buff)))
+ (realpath_err || mysys_test_invalid_symlink(name_buff)))
{
my_errno= HA_WRONG_CREATE_OPTION;
DBUG_RETURN(0);
@@ -324,13 +329,16 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
my_errno= HA_ERR_CRASHED;
goto err;
});
+ DEBUG_SYNC_C("mi_open_kfile");
if ((kfile=mysql_file_open(key_file_kfile, name_buff,
- (open_mode=O_RDWR) | O_SHARE,MYF(0))) < 0)
+ (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
(kfile=mysql_file_open(key_file_kfile, name_buff,
- (open_mode=O_RDONLY) | O_SHARE,MYF(0))) < 0)
+ (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
goto err;
}
share->mode=open_mode;
@@ -375,7 +383,18 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
(void) strmov(index_name, org_name);
*strrchr(org_name, FN_EXTCHAR)= '\0';
(void) fn_format(data_name,org_name,"",MARIA_NAME_DEXT,
- MY_APPEND_EXT|MY_UNPACK_FILENAME|MY_RESOLVE_SYMLINKS);
+ MY_APPEND_EXT|MY_UNPACK_FILENAME);
+ if (my_is_symlink(data_name))
+ {
+ if (my_realpath(data_name, data_name, MYF(0)))
+ goto err;
+ if (mysys_test_invalid_symlink(data_name))
+ {
+ my_errno= HA_WRONG_CREATE_OPTION;
+ goto err;
+ }
+ share->mode|= O_NOFOLLOW; /* all symlinks are resolved by realpath() */
+ }
info_length=mi_uint2korr(share->state.header.header_length);
base_pos= mi_uint2korr(share->state.header.base_pos);
@@ -832,7 +851,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
if ((share->data_file_type == BLOCK_RECORD ||
share->data_file_type == COMPRESSED_RECORD))
{
- if (_ma_open_datafile(&info, share, name, -1))
+ if (_ma_open_datafile(&info, share))
goto err;
data_file= info.dfile.file;
}
@@ -1004,7 +1023,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
data_file= share->bitmap.file.file; /* Only opened once */
}
- if (!(m_info= maria_clone_internal(share, name, mode, data_file,
+ if (!(m_info= maria_clone_internal(share, mode, data_file,
internal_table)))
goto err;
@@ -1878,35 +1897,15 @@ void _ma_set_index_pagecache_callbacks(PAGECACHE_FILE *file,
Open data file
We can't use dup() here as the data file descriptors need to have different
active seek-positions.
-
- The argument file_to_dup is here for the future if there would on some OS
- exist a dup()-like call that would give us two different file descriptors.
*************************************************************************/
-int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share, const char *org_name,
- File file_to_dup __attribute__((unused)))
+int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share)
{
- char *data_name= share->data_file_name.str;
- char real_data_name[FN_REFLEN];
-
- if (org_name)
- {
- fn_format(real_data_name, org_name, "", MARIA_NAME_DEXT, 4);
- if (my_is_symlink(real_data_name))
- {
- if (my_realpath(real_data_name, real_data_name, MYF(0)) ||
- (*maria_test_invalid_symlink)(real_data_name))
- {
- my_errno= HA_WRONG_CREATE_OPTION;
- return 1;
- }
- data_name= real_data_name;
- }
- }
-
+ myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS : 0);
+ DEBUG_SYNC_C("mi_open_datafile");
info->dfile.file= share->bitmap.file.file=
- mysql_file_open(key_file_dfile, data_name,
- share->mode | O_SHARE, MYF(MY_WME));
+ mysql_file_open(key_file_dfile, share->data_file_name.str,
+ share->mode | O_SHARE, MYF(flags));
return info->dfile.file >= 0 ? 0 : 1;
}
@@ -1920,8 +1919,8 @@ int _ma_open_keyfile(MARIA_SHARE *share)
mysql_mutex_lock(&share->intern_lock);
share->kfile.file= mysql_file_open(key_file_kfile,
share->unique_file_name.str,
- share->mode | O_SHARE,
- MYF(MY_WME));
+ share->mode | O_SHARE | O_NOFOLLOW,
+ MYF(MY_WME | MY_NOSYMLINKS));
mysql_mutex_unlock(&share->intern_lock);
return (share->kfile.file < 0);
}
diff --git a/storage/maria/ma_static.c b/storage/maria/ma_static.c
index 35ad7d5a96a..3e4cf3249a8 100644
--- a/storage/maria/ma_static.c
+++ b/storage/maria/ma_static.c
@@ -106,12 +106,6 @@ uint32 maria_readnext_vec[]=
SEARCH_BIGGER, SEARCH_SMALLER, SEARCH_SMALLER
};
-static int always_valid(const char *filename __attribute__((unused)))
-{
- return 0;
-}
-
-int (*maria_test_invalid_symlink)(const char *filename)= always_valid;
my_bool (*ma_killed)(MARIA_HA *)= ma_killed_standalone;
#ifdef HAVE_PSI_INTERFACE
diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c
index c9d38400bc4..0cdbe28f86b 100644
--- a/storage/maria/maria_chk.c
+++ b/storage/maria/maria_chk.c
@@ -1275,7 +1275,7 @@ static int maria_chk(HA_CHECK *param, char *filename)
mysql_file_close(info->dfile.file, MYF(MY_WME)); /* Close new file */
error|=maria_change_to_newfile(filename,MARIA_NAME_DEXT,DATA_TMP_EXT,
0, MYF(0));
- if (_ma_open_datafile(info,info->s, NullS, -1))
+ if (_ma_open_datafile(info, info->s))
error=1;
param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */
param->read_cache.file= info->dfile.file;
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index a1fff2e0e43..52d1033ab66 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -1312,8 +1312,7 @@ int _ma_def_scan_restore_pos(MARIA_HA *info, MARIA_RECORD_POS lastpos);
extern MARIA_HA *_ma_test_if_reopen(const char *filename);
my_bool _ma_check_table_is_closed(const char *name, const char *where);
-int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share, const char *org_name,
- File file_to_dup);
+int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share);
int _ma_open_keyfile(MARIA_SHARE *share);
void _ma_setup_functions(register MARIA_SHARE *share);
my_bool _ma_dynmap_file(MARIA_HA *info, my_off_t size);
diff --git a/storage/mroonga/vendor/groonga/lib/CMakeLists.txt b/storage/mroonga/vendor/groonga/lib/CMakeLists.txt
index 8959f883ca3..ef3e13e236d 100644
--- a/storage/mroonga/vendor/groonga/lib/CMakeLists.txt
+++ b/storage/mroonga/vendor/groonga/lib/CMakeLists.txt
@@ -22,8 +22,14 @@ include_directories(
${ONIGMO_INCLUDE_DIRS}
${MRUBY_INCLUDE_DIRS}
${LIBLZ4_INCLUDE_DIRS})
-link_directories(
- ${LIBLZ4_LIBRARY_DIRS})
+if (LIBLZ4_LIBRARY_DIRS)
+ find_library(LZ4_LIBS
+ NAMES ${LIBLZ4_LIBRARIES}
+ PATHS ${LIBLZ4_LIBRARY_DIRS}
+ NO_DEFAULT_PATH)
+else()
+ set(LZ4_LIBS ${LIBLZ4_LIBRARIES})
+endif()
read_file_list(${CMAKE_CURRENT_SOURCE_DIR}/sources.am LIBGROONGA_SOURCES)
read_file_list(${CMAKE_CURRENT_SOURCE_DIR}/dat/sources.am LIBGRNDAT_SOURCES)
@@ -60,7 +66,7 @@ set(GRN_ALL_LIBRARIES
${RT_LIBS}
${PTHREAD_LIBS}
${Z_LIBS}
- ${LIBLZ4_LIBRARIES}
+ ${LZ4_LIBS}
${DL_LIBS}
${M_LIBS}
${WS2_32_LIBS}
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index dac01b8af89..2b70518c8fd 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -1896,15 +1896,22 @@ int ha_myisam::info(uint flag)
Set data_file_name and index_file_name to point at the symlink value
if table is symlinked (Ie; Real name is not same as generated name)
*/
+ char buf[FN_REFLEN];
data_file_name= index_file_name= 0;
fn_format(name_buff, file->filename, "", MI_NAME_DEXT,
MY_APPEND_EXT | MY_UNPACK_FILENAME);
- if (strcmp(name_buff, misam_info.data_file_name))
- data_file_name=misam_info.data_file_name;
+ if (my_is_symlink(name_buff))
+ {
+ my_readlink(buf, name_buff, MYF(0));
+ data_file_name= ha_thd()->strdup(buf);
+ }
fn_format(name_buff, file->filename, "", MI_NAME_IEXT,
MY_APPEND_EXT | MY_UNPACK_FILENAME);
- if (strcmp(name_buff, misam_info.index_file_name))
- index_file_name=misam_info.index_file_name;
+ if (my_is_symlink(name_buff))
+ {
+ my_readlink(buf, name_buff, MYF(0));
+ index_file_name= ha_thd()->strdup(buf);
+ }
}
if (flag & HA_STATUS_ERRKEY)
{
@@ -2179,6 +2186,74 @@ uint ha_myisam::checksum() const
}
+enum_alter_inplace_result
+ha_myisam::check_if_supported_inplace_alter(TABLE *new_table,
+ Alter_inplace_info *alter_info)
+{
+ DBUG_ENTER("ha_myisam::check_if_supported_inplace_alter");
+
+ const uint readd_index= Alter_inplace_info::ADD_INDEX |
+ Alter_inplace_info::DROP_INDEX;
+ const uint readd_unique= Alter_inplace_info::ADD_UNIQUE_INDEX |
+ Alter_inplace_info::DROP_UNIQUE_INDEX;
+ const uint readd_pk= Alter_inplace_info::ADD_PK_INDEX |
+ Alter_inplace_info::DROP_PK_INDEX;
+
+ const uint op= alter_info->handler_flags;
+
+ /*
+ ha_myisam::open() updates table->key_info->block_size to be the actual
+ MYI index block size, overwriting user-specified value (if any).
+ So, the server can not reliably detect whether ALTER TABLE changes
+ key_block_size or not, it might think the block size was changed,
+ when it wasn't, and in this case the server will recreate (drop+add)
+ the index unnecessary. Fix it.
+ */
+
+ if (table->s->keys == new_table->s->keys &&
+ ((op & readd_pk) == readd_pk ||
+ (op & readd_unique) == readd_unique ||
+ (op & readd_index) == readd_index))
+ {
+ for (uint i=0; i < table->s->keys; i++)
+ {
+ KEY *old_key= table->key_info + i;
+ KEY *new_key= new_table->key_info + i;
+
+ if (old_key->block_size == new_key->block_size)
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); // must differ somewhere else
+
+ if (new_key->block_size && new_key->block_size != old_key->block_size)
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); // really changed
+
+ /* any difference besides the block_size, and we give up */
+ if (old_key->key_length != new_key->key_length ||
+ old_key->flags != new_key->flags ||
+ old_key->user_defined_key_parts != new_key->user_defined_key_parts ||
+ old_key->algorithm != new_key->algorithm ||
+ strcmp(old_key->name, new_key->name))
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+
+ for (uint j= 0; j < old_key->user_defined_key_parts; j++)
+ {
+ KEY_PART_INFO *old_kp= old_key->key_part + j;
+ KEY_PART_INFO *new_kp= new_key->key_part + j;
+ if (old_kp->offset != new_kp->offset ||
+ old_kp->null_offset != new_kp->null_offset ||
+ old_kp->length != new_kp->length ||
+ old_kp->fieldnr != new_kp->fieldnr ||
+ old_kp->key_part_flag != new_kp->key_part_flag ||
+ old_kp->type != new_kp->type ||
+ old_kp->null_bit != new_kp->null_bit)
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+ }
+ alter_info->handler_flags &= ~(readd_pk | readd_unique | readd_index);
+ }
+ DBUG_RETURN(handler::check_if_supported_inplace_alter(new_table, alter_info));
+}
+
+
bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index 63fb0ea5a2a..b132c955795 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -138,6 +138,8 @@ class ha_myisam: public handler
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt);
int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
+ enum_alter_inplace_result check_if_supported_inplace_alter(TABLE *new_table,
+ Alter_inplace_info *alter_info);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
#ifdef HAVE_QUERY_CACHE
my_bool register_query_cache_table(THD *thd, char *table_key,
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index ec72e0b430a..bbe45aa3c26 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -80,8 +80,7 @@ static SORT_KEY_BLOCKS *alloc_key_blocks(HA_CHECK *param, uint blocks,
uint buffer_length);
static ha_checksum mi_byte_checksum(const uchar *buf, uint length);
static void set_data_file_type(MI_SORT_INFO *sort_info, MYISAM_SHARE *share);
-static int replace_data_file(HA_CHECK *param, MI_INFO *info,
- const char *name, File new_file);
+static int replace_data_file(HA_CHECK *param, MI_INFO *info, File new_file);
void myisamchk_init(HA_CHECK *param)
{
@@ -1715,7 +1714,7 @@ err:
/* Replace the actual file with the temporary file */
if (new_file >= 0)
{
- got_error= replace_data_file(param, info, name, new_file);
+ got_error= replace_data_file(param, info, new_file);
new_file= -1;
param->retry_repair= 0;
}
@@ -2528,7 +2527,7 @@ err:
/* Replace the actual file with the temporary file */
if (new_file >= 0)
{
- got_error= replace_data_file(param, info, name, new_file);
+ got_error= replace_data_file(param, info, new_file);
new_file= -1;
}
}
@@ -2542,7 +2541,7 @@ err:
(void) mysql_file_delete(mi_key_file_datatmp,
param->temp_filename, MYF(MY_WME));
if (info->dfile == new_file) /* Retry with key cache */
- if (unlikely(mi_open_datafile(info, share, name, -1)))
+ if (unlikely(mi_open_datafile(info, share)))
param->retry_repair= 0; /* Safety */
}
mi_mark_crashed_on_repair(info);
@@ -3063,7 +3062,7 @@ err:
/* Replace the actual file with the temporary file */
if (new_file >= 0)
{
- got_error= replace_data_file(param, info, name, new_file);
+ got_error= replace_data_file(param, info, new_file);
new_file= -1;
}
}
@@ -3077,7 +3076,7 @@ err:
(void) mysql_file_delete(mi_key_file_datatmp,
param->temp_filename, MYF(MY_WME));
if (info->dfile == new_file) /* Retry with key cache */
- if (unlikely(mi_open_datafile(info, share, name, -1)))
+ if (unlikely(mi_open_datafile(info, share)))
param->retry_repair= 0; /* Safety */
}
mi_mark_crashed_on_repair(info);
@@ -4760,8 +4759,7 @@ int mi_make_backup_of_index(MI_INFO *info, time_t backup_time, myf flags)
}
-static int replace_data_file(HA_CHECK *param, MI_INFO *info,
- const char *name, File new_file)
+static int replace_data_file(HA_CHECK *param, MI_INFO *info, File new_file)
{
MYISAM_SHARE *share=info->s;
@@ -4796,7 +4794,7 @@ static int replace_data_file(HA_CHECK *param, MI_INFO *info,
DATA_TMP_EXT, param->backup_time,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
- mi_open_datafile(info, share, name, -1))
+ mi_open_datafile(info, share))
return 1;
return 0;
}
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index 88b9da6f8a9..aa85a3718de 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -46,7 +46,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
uint aligned_key_start, block_length, res;
uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
- char filename[FN_REFLEN],linkname[FN_REFLEN], *linkname_ptr;
+ char kfilename[FN_REFLEN],klinkname[FN_REFLEN], *klinkname_ptr;
+ char dfilename[FN_REFLEN],dlinkname[FN_REFLEN], *dlinkname_ptr;
ulong pack_reclength;
ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
@@ -594,19 +595,19 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->index_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->index_file_name, MI_NAME_IEXT,
+ fn_format(kfilename, name, ci->index_file_name, MI_NAME_IEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME |
MY_RETURN_REAL_PATH | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->index_file_name, "", MI_NAME_IEXT,
+ fn_format(kfilename, ci->index_file_name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "", MI_NAME_IEXT,
+ fn_format(klinkname, name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT);
- linkname_ptr=linkname;
+ klinkname_ptr= klinkname;
/*
Don't create the table if the link or file exists to ensure that one
doesn't accidently destroy another table.
@@ -617,10 +618,10 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
{
char *iext= strrchr(name, '.');
int have_iext= iext && !strcmp(iext, MI_NAME_IEXT);
- fn_format(filename, name, "", MI_NAME_IEXT,
+ fn_format(kfilename, name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
- linkname_ptr=0;
+ klinkname_ptr= 0;
/* Replace the current file */
create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
@@ -635,7 +636,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
NOTE: The filename is compared against unique_file_name of every
open table. Hence we need a real path here.
*/
- if (!internal_table && test_if_reopen(filename))
+ if (!internal_table && test_if_reopen(kfilename))
{
my_printf_error(HA_ERR_TABLE_EXIST, "MyISAM table '%s' is in use "
"(most likely by a MERGE table). Try FLUSH TABLES.",
@@ -645,7 +646,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
}
if ((file= mysql_file_create_with_symlink(mi_key_file_kfile,
- linkname_ptr, filename, 0,
+ klinkname_ptr, kfilename, 0,
create_mode,
MYF(MY_WME | create_flag))) < 0)
goto err;
@@ -665,31 +666,31 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->data_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->data_file_name, MI_NAME_DEXT,
+ fn_format(dfilename, name, ci->data_file_name, MI_NAME_DEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->data_file_name, "", MI_NAME_DEXT,
+ fn_format(dfilename, ci->data_file_name, "", MI_NAME_DEXT,
MY_UNPACK_FILENAME |
(have_dext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "",MI_NAME_DEXT,
+ fn_format(dlinkname, name, "",MI_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr=linkname;
+ dlinkname_ptr= dlinkname;
create_flag=0;
}
else
{
- fn_format(filename,name,"", MI_NAME_DEXT,
+ fn_format(dfilename,name,"", MI_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr=0;
+ dlinkname_ptr= 0;
create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
if ((dfile=
mysql_file_create_with_symlink(mi_key_file_dfile,
- linkname_ptr, filename, 0,
+ dlinkname_ptr, dfilename, 0,
create_mode,
MYF(MY_WME | create_flag))) < 0)
goto err;
@@ -843,19 +844,21 @@ err_no_lock:
(void) mysql_file_close(dfile, MYF(0));
/* fall through */
case 2:
- if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(mi_key_file_dfile,
- fn_format(filename, name, "", MI_NAME_DEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- MYF(0));
+ if (! (flags & HA_DONT_TOUCH_DATA))
+ {
+ mysql_file_delete(mi_key_file_dfile, dfilename, MYF(0));
+ if (dlinkname_ptr)
+ mysql_file_delete(mi_key_file_dfile, dlinkname_ptr, MYF(0));
+ }
/* fall through */
case 1:
(void) mysql_file_close(file, MYF(0));
if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(mi_key_file_kfile,
- fn_format(filename, name, "", MI_NAME_IEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- MYF(0));
+ {
+ mysql_file_delete(mi_key_file_kfile, kfilename, MYF(0));
+ if (klinkname_ptr)
+ mysql_file_delete(mi_key_file_kfile, klinkname_ptr, MYF(0));
+ }
}
my_free(rec_per_key_part);
DBUG_RETURN(my_errno=save_errno); /* return the fatal errno */
diff --git a/storage/myisam/mi_delete_table.c b/storage/myisam/mi_delete_table.c
index 7da960011ca..a72a94b06d2 100644
--- a/storage/myisam/mi_delete_table.c
+++ b/storage/myisam/mi_delete_table.c
@@ -26,30 +26,6 @@
#define mi_key_file_dfile 0
#endif
-static int delete_one_file(const char *name, const char *ext,
- PSI_file_key pskey __attribute__((unused)),
- myf flags)
-{
- char from[FN_REFLEN];
- DBUG_ENTER("delete_one_file");
- fn_format(from,name, "", ext, MY_UNPACK_FILENAME | MY_APPEND_EXT);
- if (my_is_symlink(from) && (*myisam_test_invalid_symlink)(from))
- {
- /*
- Symlink is pointing to file in data directory.
- Remove symlink, keep file.
- */
- if (mysql_file_delete(pskey, from, flags))
- DBUG_RETURN(my_errno);
- }
- else
- {
- if (mysql_file_delete_with_symlink(pskey, from, flags))
- DBUG_RETURN(my_errno);
- }
- DBUG_RETURN(0);
-}
-
int mi_delete_table(const char *name)
{
int res;
@@ -59,14 +35,14 @@ int mi_delete_table(const char *name)
check_table_is_closed(name,"delete");
#endif
- if ((res= delete_one_file(name, MI_NAME_IEXT, mi_key_file_kfile, MYF(MY_WME))))
- DBUG_RETURN(res);
- if ((res= delete_one_file(name, MI_NAME_DEXT, mi_key_file_dfile, MYF(MY_WME))))
- DBUG_RETURN(res);
+ if (my_handler_delete_with_symlink(mi_key_file_kfile, name, MI_NAME_IEXT, MYF(MY_WME)) ||
+ my_handler_delete_with_symlink(mi_key_file_dfile, name, MI_NAME_DEXT, MYF(MY_WME)))
+ DBUG_RETURN(my_errno);
+
// optionally present:
- delete_one_file(name, ".OLD", mi_key_file_dfile, MYF(0));
- delete_one_file(name, ".TMD", mi_key_file_dfile, MYF(0));
+ my_handler_delete_with_symlink(mi_key_file_dfile, name, ".OLD", MYF(0));
+ my_handler_delete_with_symlink(mi_key_file_dfile, name, ".TMD", MYF(0));
DBUG_RETURN(0);
}
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index 060017f10ad..bdb2fdf8447 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -104,8 +104,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
realpath_err= my_realpath(name_buff,
fn_format(org_name,name,"",MI_NAME_IEXT,4),MYF(0));
+ if (realpath_err > 0) /* File not found, no point in looking further. */
+ {
+ DBUG_RETURN(NULL);
+ }
+
if (my_is_symlink(org_name) &&
- (realpath_err || (*myisam_test_invalid_symlink)(name_buff)))
+ (realpath_err || mysys_test_invalid_symlink(name_buff)))
{
my_errno= HA_WRONG_CREATE_OPTION;
DBUG_RETURN (NULL);
@@ -131,15 +136,17 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
my_errno= HA_ERR_CRASHED;
goto err;
});
- if ((kfile= mysql_file_open(mi_key_file_kfile,
- name_buff,
- (open_mode= O_RDWR) | O_SHARE, MYF(0))) < 0)
+
+ DEBUG_SYNC_C("mi_open_kfile");
+ if ((kfile= mysql_file_open(mi_key_file_kfile, name_buff,
+ (open_mode= O_RDWR) | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
- (kfile= mysql_file_open(mi_key_file_kfile,
- name_buff,
- (open_mode= O_RDONLY) | O_SHARE, MYF(0))) < 0)
+ (kfile= mysql_file_open(mi_key_file_kfile, name_buff,
+ (open_mode= O_RDONLY) | O_SHARE| O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
goto err;
}
share->mode=open_mode;
@@ -183,7 +190,18 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
(void) strmov(index_name, org_name);
*strrchr(org_name, '.')= '\0';
(void) fn_format(data_name,org_name,"",MI_NAME_DEXT,
- MY_APPEND_EXT|MY_UNPACK_FILENAME|MY_RESOLVE_SYMLINKS);
+ MY_APPEND_EXT|MY_UNPACK_FILENAME);
+ if (my_is_symlink(data_name))
+ {
+ if (my_realpath(data_name, data_name, MYF(0)))
+ goto err;
+ if (mysys_test_invalid_symlink(data_name))
+ {
+ my_errno= HA_WRONG_CREATE_OPTION;
+ goto err;
+ }
+ share->mode|= O_NOFOLLOW; /* all symlinks are resolved by realpath() */
+ }
info_length=mi_uint2korr(share->state.header.header_length);
base_pos=mi_uint2korr(share->state.header.base_pos);
@@ -497,7 +515,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
lock_error=1; /* Database unlocked */
}
- if (mi_open_datafile(&info, share, name, -1))
+ if (mi_open_datafile(&info, share))
goto err;
errpos=5;
@@ -578,7 +596,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
my_errno=EACCES; /* Can't open in write mode */
goto err;
}
- if (mi_open_datafile(&info, share, name, old_info->dfile))
+ if (mi_open_datafile(&info, share))
goto err;
errpos=5;
have_rtree= old_info->rtree_recursion_state != NULL;
@@ -1250,33 +1268,14 @@ uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo)
Open data file.
We can't use dup() here as the data file descriptors need to have different
active seek-positions.
-
-The argument file_to_dup is here for the future if there would on some OS
-exist a dup()-like call that would give us two different file descriptors.
*************************************************************************/
-int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *org_name,
- File file_to_dup __attribute__((unused)))
+int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share)
{
- char *data_name= share->data_file_name;
- char real_data_name[FN_REFLEN];
-
- if (org_name)
- {
- fn_format(real_data_name,org_name,"",MI_NAME_DEXT,4);
- if (my_is_symlink(real_data_name))
- {
- if (my_realpath(real_data_name, real_data_name, MYF(0)) ||
- (*myisam_test_invalid_symlink)(real_data_name))
- {
- my_errno= HA_WRONG_CREATE_OPTION;
- return 1;
- }
- data_name= real_data_name;
- }
- }
- info->dfile= mysql_file_open(mi_key_file_dfile,
- data_name, share->mode | O_SHARE, MYF(MY_WME));
+ myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS: 0);
+ DEBUG_SYNC_C("mi_open_datafile");
+ info->dfile= mysql_file_open(mi_key_file_dfile, share->data_file_name,
+ share->mode | O_SHARE, MYF(flags));
return info->dfile >= 0 ? 0 : 1;
}
@@ -1285,8 +1284,8 @@ int mi_open_keyfile(MYISAM_SHARE *share)
{
if ((share->kfile= mysql_file_open(mi_key_file_kfile,
share->unique_file_name,
- share->mode | O_SHARE,
- MYF(MY_WME))) < 0)
+ share->mode | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS | MY_WME))) < 0)
return 1;
return 0;
}
diff --git a/storage/myisam/mi_static.c b/storage/myisam/mi_static.c
index d77f4f6b8e2..49019fb861c 100644
--- a/storage/myisam/mi_static.c
+++ b/storage/myisam/mi_static.c
@@ -42,14 +42,6 @@ ulong myisam_data_pointer_size=4;
ulonglong myisam_mmap_size= SIZE_T_MAX, myisam_mmap_used= 0;
my_bool (*mi_killed)(MI_INFO *)= mi_killed_standalone;
-static int always_valid(const char *filename __attribute__((unused)))
-{
- return 0;
-}
-
-int (*myisam_test_invalid_symlink)(const char *filename)= always_valid;
-
-
/*
read_vec[] is used for converting between P_READ_KEY.. and SEARCH_
Position is , == , >= , <= , > , <
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index 7835ab83531..edbe235e190 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -1047,7 +1047,7 @@ static int myisamchk(HA_CHECK *param, char * filename)
MYF(MY_WME)); /* Close new file */
error|=change_to_newfile(filename, MI_NAME_DEXT, DATA_TMP_EXT,
0, MYF(0));
- if (mi_open_datafile(info,info->s, NULL, -1))
+ if (mi_open_datafile(info, info->s))
error=1;
param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */
param->read_cache.file=info->dfile;
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index 07c4a5b473a..92fc5e45d5d 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -713,8 +713,7 @@ void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
my_bool all_keys);
extern MI_INFO *test_if_reopen(char *filename);
my_bool check_table_is_closed(const char *name, const char *where);
-int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *orn_name,
- File file_to_dup);
+int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share);
int mi_open_keyfile(MYISAM_SHARE *share);
void mi_setup_functions(register MYISAM_SHARE *share);
diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result
index 42c86405503..6b1d0a1854d 100644
--- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result
+++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result
@@ -33,5 +33,5 @@ FROM `version_history` AS `v` INNER JOIN `db_history` AS `db` ON `db`.`nodeID` =
WHERE `latch` = 'breadth_first' AND `origid` = '1' ORDER BY `weight` DESC LIMIT 1;
version nodeID
0.0.3 3
-DROP TABLE db_history;
DROP TABLE version_history;
+DROP TABLE db_history;
diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.test b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.test
index 3a7a0e5af08..b8f0ae556c8 100644
--- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.test
+++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.test
@@ -40,8 +40,8 @@
--disconnect con2
--connect (con3,localhost,root,,test)
-DROP TABLE db_history;
DROP TABLE version_history;
+DROP TABLE db_history;
--disconnect con3
diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.result b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.result
index 8e680e15206..68002ce98a2 100644
--- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.result
+++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.result
@@ -8,5 +8,5 @@ latch origid destid weight seq linkid
breadth_first 1 6 NULL 0 1
breadth_first 1 6 1 1 2
breadth_first 1 6 1 2 6
-DROP TABLE IF EXISTS oq_backing;
DROP TABLE IF EXISTS oq_graph;
+DROP TABLE IF EXISTS oq_backing;
diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.test b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.test
index 72de926da2e..fefd03a7ed9 100644
--- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.test
+++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6345.test
@@ -14,6 +14,6 @@ CREATE TABLE oq_graph (latch VARCHAR(32) NULL, origid BIGINT UNSIGNED NULL, dest
SELECT * FROM oq_graph WHERE latch='breadth_first' AND origid=1 AND destid=6;
-DROP TABLE IF EXISTS oq_backing;
DROP TABLE IF EXISTS oq_graph;
+DROP TABLE IF EXISTS oq_backing;
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index 3017d3cf68e..90a8f4974c4 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.34-79.1)
+SET(TOKUDB_VERSION 5.6.35-80.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
index 30a8710d7aa..ad9ecb1d074 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -651,10 +651,8 @@ void toku_ftnode_clone_callback(void *value_data,
// set new pair attr if necessary
if (node->height == 0) {
*new_attr = make_ftnode_pair_attr(node);
- for (int i = 0; i < node->n_children; i++) {
- BLB(node, i)->logical_rows_delta = 0;
- BLB(cloned_node, i)->logical_rows_delta = 0;
- }
+ node->logical_rows_delta = 0;
+ cloned_node->logical_rows_delta = 0;
} else {
new_attr->is_valid = false;
}
@@ -702,6 +700,10 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
if (ftnode->height == 0) {
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
+ if (!ftnode->dirty) {
+ toku_ft_adjust_logical_row_count(
+ ft, -ftnode->logical_rows_delta);
+ }
} else {
FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF_BYTES, node_size);
@@ -714,11 +716,12 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
BASEMENTNODE bn = BLB(ftnode, i);
toku_ft_decrease_stats(&ft->in_memory_stats,
bn->stat64_delta);
- if (!ftnode->dirty)
- toku_ft_adjust_logical_row_count(
- ft, -bn->logical_rows_delta);
}
}
+ if (!ftnode->dirty) {
+ toku_ft_adjust_logical_row_count(
+ ft, -ftnode->logical_rows_delta);
+ }
}
}
toku_ftnode_free(&ftnode);
@@ -944,8 +947,6 @@ int toku_ftnode_pe_callback(void *ftnode_pv,
basements_to_destroy[num_basements_to_destroy++] = bn;
toku_ft_decrease_stats(&ft->in_memory_stats,
bn->stat64_delta);
- toku_ft_adjust_logical_row_count(ft,
- -bn->logical_rows_delta);
set_BNULL(node, i);
BP_STATE(node, i) = PT_ON_DISK;
num_partial_evictions++;
@@ -2652,7 +2653,7 @@ static std::unique_ptr<char[], decltype(&toku_free)> toku_file_get_parent_dir(
return result;
}
-static bool toku_create_subdirs_if_needed(const char *path) {
+bool toku_create_subdirs_if_needed(const char *path) {
static const mode_t dir_mode = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP |
S_IWGRP | S_IXGRP | S_IROTH | S_IXOTH;
@@ -4563,6 +4564,8 @@ int toku_ft_rename_iname(DB_TXN *txn,
bs_new_name);
}
+ if (!toku_create_subdirs_if_needed(new_iname_full.get()))
+ return get_error_errno();
r = toku_os_rename(old_iname_full.get(), new_iname_full.get());
if (r != 0)
return r;
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.h b/storage/tokudb/PerconaFT/ft/ft-ops.h
index 70cf045d43c..df8ffe287df 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.h
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.h
@@ -288,3 +288,8 @@ void toku_ft_set_direct_io(bool direct_io_on);
void toku_ft_set_compress_buffers_before_eviction(bool compress_buffers);
void toku_note_deserialized_basement_node(bool fixed_key_size);
+
+// Creates all directories for the path if necessary,
+// returns true if all dirs are created successfully or
+// all dirs exist, false otherwise.
+bool toku_create_subdirs_if_needed(const char* path);
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.cc b/storage/tokudb/PerconaFT/ft/logger/recover.cc
index a9c30c0e37a..9eaa56bdc53 100644
--- a/storage/tokudb/PerconaFT/ft/logger/recover.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.cc
@@ -987,7 +987,8 @@ static int toku_recover_frename(struct logtype_frename *l, RECOVER_ENV renv) {
return 1;
if (old_exist && !new_exist &&
- (toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
+ (!toku_create_subdirs_if_needed(new_iname_full.get()) ||
+ toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
toku_fsync_directory(old_iname_full.get()) == -1 ||
toku_fsync_directory(new_iname_full.get()) == -1))
return 1;
diff --git a/storage/tokudb/PerconaFT/ft/node.cc b/storage/tokudb/PerconaFT/ft/node.cc
index 12e5fda226e..07309ff7f94 100644
--- a/storage/tokudb/PerconaFT/ft/node.cc
+++ b/storage/tokudb/PerconaFT/ft/node.cc
@@ -386,7 +386,8 @@ static void bnc_apply_messages_to_basement_node(
const pivot_bounds &
bounds, // contains pivot key bounds of this basement node
txn_gc_info *gc_info,
- bool *msgs_applied) {
+ bool *msgs_applied,
+ int64_t* logical_rows_delta) {
int r;
NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
@@ -394,7 +395,6 @@ static void bnc_apply_messages_to_basement_node(
// apply messages from this buffer
STAT64INFO_S stats_delta = {0, 0};
uint64_t workdone_this_ancestor = 0;
- int64_t logical_rows_delta = 0;
uint32_t stale_lbi, stale_ube;
if (!bn->stale_ancestor_messages_applied) {
@@ -470,7 +470,7 @@ static void bnc_apply_messages_to_basement_node(
gc_info,
&workdone_this_ancestor,
&stats_delta,
- &logical_rows_delta);
+ logical_rows_delta);
}
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark
@@ -482,7 +482,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
- .logical_rows_delta = &logical_rows_delta};
+ .logical_rows_delta = logical_rows_delta};
if (fresh_ube - fresh_lbi > 0)
*msgs_applied = true;
r = bnc->fresh_message_tree
@@ -503,7 +503,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
- .logical_rows_delta = &logical_rows_delta};
+ .logical_rows_delta = logical_rows_delta};
r = bnc->stale_message_tree
.iterate_on_range<struct iterate_do_bn_apply_msg_extra,
@@ -521,8 +521,6 @@ static void bnc_apply_messages_to_basement_node(
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
}
- toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
- bn->logical_rows_delta += logical_rows_delta;
}
static void
@@ -536,6 +534,7 @@ apply_ancestors_messages_to_bn(
bool* msgs_applied
)
{
+ int64_t logical_rows_delta = 0;
BASEMENTNODE curr_bn = BLB(node, childnum);
const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
@@ -548,13 +547,16 @@ apply_ancestors_messages_to_bn(
curr_ancestors->childnum,
curr_bounds,
gc_info,
- msgs_applied
+ msgs_applied,
+ &logical_rows_delta
);
// We don't want to check this ancestor node again if the
// next time we query it, the msn hasn't changed.
curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk;
}
}
+ toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
+ node->logical_rows_delta += logical_rows_delta;
// At this point, we know all the stale messages above this
// basement node have been applied, and any new messages will be
// fresh, so we don't need to look at stale messages for this
diff --git a/storage/tokudb/PerconaFT/ft/node.h b/storage/tokudb/PerconaFT/ft/node.h
index 52eefec0936..db189e36d59 100644
--- a/storage/tokudb/PerconaFT/ft/node.h
+++ b/storage/tokudb/PerconaFT/ft/node.h
@@ -157,36 +157,49 @@ private:
// TODO: class me up
struct ftnode {
- MSN max_msn_applied_to_node_on_disk; // max_msn_applied that will be written to disk
+ // max_msn_applied that will be written to disk
+ MSN max_msn_applied_to_node_on_disk;
unsigned int flags;
- BLOCKNUM blocknum; // Which block number is this node?
- int layout_version; // What version of the data structure?
- int layout_version_original; // different (<) from layout_version if upgraded from a previous version (useful for debugging)
- int layout_version_read_from_disk; // transient, not serialized to disk, (useful for debugging)
- uint32_t build_id; // build_id (svn rev number) of software that wrote this node to disk
- int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */
- int dirty;
+ // Which block number is this node?
+ BLOCKNUM blocknum;
+ // What version of the data structure?
+ int layout_version;
+ // different (<) from layout_version if upgraded from a previous version
+ // (useful for debugging)
+ int layout_version_original;
+ // transient, not serialized to disk, (useful for debugging)
+ int layout_version_read_from_disk;
+ // build_id (svn rev number) of software that wrote this node to disk
+ uint32_t build_id;
+ // height is always >= 0. 0 for leaf, >0 for nonleaf.
+ int height;
+ int dirty;
uint32_t fullhash;
+ // current count of rows add or removed as a result of message application
+ // to this node as a basement, irrelevant for internal nodes, gets reset
+ // when node is undirtied. Used to back out tree scoped LRC id node is
+ // evicted but not persisted
+ int64_t logical_rows_delta;
- // for internal nodes, if n_children==fanout+1 then the tree needs to be rebalanced.
- // for leaf nodes, represents number of basement nodes
+ // for internal nodes, if n_children==fanout+1 then the tree needs to be
+ // rebalanced. for leaf nodes, represents number of basement nodes
int n_children;
ftnode_pivot_keys pivotkeys;
- // What's the oldest referenced xid that this node knows about? The real oldest
- // referenced xid might be younger, but this is our best estimate. We use it
- // as a heuristic to transition provisional mvcc entries from provisional to
- // committed (from implicity committed to really committed).
+ // What's the oldest referenced xid that this node knows about? The real
+ // oldest referenced xid might be younger, but this is our best estimate.
+ // We use it as a heuristic to transition provisional mvcc entries from
+ // provisional to committed (from implicity committed to really committed).
//
- // A better heuristic would be the oldest live txnid, but we use this since it
- // still works well most of the time, and its readily available on the inject
- // code path.
+ // A better heuristic would be the oldest live txnid, but we use this since
+ // it still works well most of the time, and its readily available on the
+ // inject code path.
TXNID oldest_referenced_xid_known;
// array of size n_children, consisting of ftnode partitions
- // each one is associated with a child
- // for internal nodes, the ith partition corresponds to the ith message buffer
- // for leaf nodes, the ith partition corresponds to the ith basement node
+ // each one is associated with a child for internal nodes, the ith
+ // partition corresponds to the ith message buffer for leaf nodes, the ith
+ // partition corresponds to the ith basement node
struct ftnode_partition *bp;
struct ctpair *ct_pair;
};
@@ -199,7 +212,6 @@ struct ftnode_leaf_basement_node {
MSN max_msn_applied; // max message sequence number applied
bool stale_ancestor_messages_applied;
STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk
- int64_t logical_rows_delta;
};
typedef struct ftnode_leaf_basement_node *BASEMENTNODE;
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
index 5914f8a1050..56876b474d4 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
@@ -996,7 +996,6 @@ BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn) {
bn->seqinsert = orig_bn->seqinsert;
bn->stale_ancestor_messages_applied = orig_bn->stale_ancestor_messages_applied;
bn->stat64_delta = orig_bn->stat64_delta;
- bn->logical_rows_delta = orig_bn->logical_rows_delta;
bn->data_buffer.clone(&orig_bn->data_buffer);
return bn;
}
@@ -1007,7 +1006,6 @@ BASEMENTNODE toku_create_empty_bn_no_buffer(void) {
bn->seqinsert = 0;
bn->stale_ancestor_messages_applied = false;
bn->stat64_delta = ZEROSTATS;
- bn->logical_rows_delta = 0;
bn->data_buffer.init_zero();
return bn;
}
@@ -1432,6 +1430,7 @@ static FTNODE alloc_ftnode_for_deserialize(uint32_t fullhash, BLOCKNUM blocknum)
node->fullhash = fullhash;
node->blocknum = blocknum;
node->dirty = 0;
+ node->logical_rows_delta = 0;
node->bp = nullptr;
node->oldest_referenced_xid_known = TXNID_NONE;
return node;
diff --git a/storage/tokudb/PerconaFT/ft/txn/roll.cc b/storage/tokudb/PerconaFT/ft/txn/roll.cc
index 9f3977743a0..4f374d62173 100644
--- a/storage/tokudb/PerconaFT/ft/txn/roll.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/roll.cc
@@ -227,7 +227,8 @@ int toku_rollback_frename(BYTESTRING old_iname,
return 1;
if (!old_exist && new_exist &&
- (toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
+ (!toku_create_subdirs_if_needed(old_iname_full.get()) ||
+ toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
toku_fsync_directory(new_iname_full.get()) == -1 ||
toku_fsync_directory(old_iname_full.get()) == -1))
return 1;
diff --git a/storage/tokudb/PerconaFT/util/dmt.h b/storage/tokudb/PerconaFT/util/dmt.h
index 71cde8814ab..99be296d0e9 100644
--- a/storage/tokudb/PerconaFT/util/dmt.h
+++ b/storage/tokudb/PerconaFT/util/dmt.h
@@ -589,7 +589,6 @@ private:
void convert_from_tree_to_array(void);
- __attribute__((nonnull(2,5)))
void delete_internal(subtree *const subtreep, const uint32_t idx, subtree *const subtree_replace, subtree **const rebalance_subtree);
template<typename iterate_extra_t,
@@ -627,16 +626,12 @@ private:
__attribute__((nonnull))
void rebalance(subtree *const subtree);
- __attribute__((nonnull(3)))
static void copyout(uint32_t *const outlen, dmtdata_t *const out, const dmt_node *const n);
- __attribute__((nonnull(3)))
static void copyout(uint32_t *const outlen, dmtdata_t **const out, dmt_node *const n);
- __attribute__((nonnull(4)))
static void copyout(uint32_t *const outlen, dmtdata_t *const out, const uint32_t len, const dmtdata_t *const stored_value_ptr);
- __attribute__((nonnull(4)))
static void copyout(uint32_t *const outlen, dmtdata_t **const out, const uint32_t len, dmtdata_t *const stored_value_ptr);
template<typename dmtcmp_t,
diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h
index 799ed0eae7c..c7ed2ca546f 100644
--- a/storage/tokudb/PerconaFT/util/omt.h
+++ b/storage/tokudb/PerconaFT/util/omt.h
@@ -284,7 +284,6 @@ public:
* By taking ownership of the array, we save a malloc and memcpy,
* and possibly a free (if the caller is done with the array).
*/
- __attribute__((nonnull))
void create_steal_sorted_array(omtdata_t **const values, const uint32_t numvalues, const uint32_t new_capacity);
/**
@@ -667,7 +666,6 @@ private:
void set_at_internal(const subtree &subtree, const omtdata_t &value, const uint32_t idx);
- __attribute__((nonnull(2,5)))
void delete_internal(subtree *const subtreep, const uint32_t idx, omt_node *const copyn, subtree **const rebalance_subtree);
template<typename iterate_extra_t,
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 39a2af3f8eb..ab6c7b9f9bc 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -29,6 +29,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "tokudb_status.h"
#include "tokudb_card.h"
#include "ha_tokudb.h"
+#include "sql_db.h"
#if TOKU_INCLUDE_EXTENDED_KEYS
@@ -6168,8 +6169,6 @@ int ha_tokudb::info(uint flag) {
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;
- TOKU_DB_FRAGMENTATION_S frag_info;
- memset(&frag_info, 0, sizeof frag_info);
error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
if (error) {
@@ -6186,11 +6185,6 @@ int ha_tokudb::info(uint flag) {
} else {
goto cleanup;
}
- error = share->file->get_fragmentation(share->file, &frag_info);
- if (error) {
- goto cleanup;
- }
- stats.delete_length = frag_info.unused_bytes;
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
@@ -6202,6 +6196,7 @@ int ha_tokudb::info(uint flag) {
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
stats.data_file_length = dict_stats.bt_dsize;
+ stats.delete_length = dict_stats.bt_fsize - dict_stats.bt_dsize;
if (hidden_primary_key) {
//
// in this case, we have a hidden primary key, do not
@@ -6237,30 +6232,21 @@ int ha_tokudb::info(uint flag) {
//
// this solution is much simpler than trying to maintain an
// accurate number of valid keys at the handlerton layer.
- uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
+ uint curr_num_DBs =
+ table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
// skip the primary key, skip dropped indexes
if (i == primary_key || share->key_file[i] == NULL) {
continue;
}
- error =
- share->key_file[i]->stat64(
- share->key_file[i],
- txn,
- &dict_stats);
+ error = share->key_file[i]->stat64(
+ share->key_file[i], txn, &dict_stats);
if (error) {
goto cleanup;
}
stats.index_file_length += dict_stats.bt_dsize;
-
- error =
- share->file->get_fragmentation(
- share->file,
- &frag_info);
- if (error) {
- goto cleanup;
- }
- stats.delete_length += frag_info.unused_bytes;
+ stats.delete_length +=
+ dict_stats.bt_fsize - dict_stats.bt_dsize;
}
}
@@ -7697,6 +7683,27 @@ int ha_tokudb::delete_table(const char *name) {
TOKUDB_HANDLER_DBUG_RETURN(error);
}
+static bool tokudb_check_db_dir_exist_from_table_name(const char *table_name) {
+ DBUG_ASSERT(table_name);
+ bool mysql_dir_exists;
+ char db_name[FN_REFLEN];
+ const char *db_name_begin = strchr(table_name, FN_LIBCHAR);
+ const char *db_name_end = strrchr(table_name, FN_LIBCHAR);
+ DBUG_ASSERT(db_name_begin);
+ DBUG_ASSERT(db_name_end);
+ DBUG_ASSERT(db_name_begin != db_name_end);
+
+ ++db_name_begin;
+ size_t db_name_size = db_name_end - db_name_begin;
+
+ DBUG_ASSERT(db_name_size < FN_REFLEN);
+
+ memcpy(db_name, db_name_begin, db_name_size);
+ db_name[db_name_size] = '\0';
+ mysql_dir_exists = (check_db_dir_existence(db_name) == 0);
+
+ return mysql_dir_exists;
+}
//
// renames table from "from" to "to"
@@ -7719,15 +7726,33 @@ int ha_tokudb::rename_table(const char *from, const char *to) {
TOKUDB_SHARE::drop_share(share);
}
int error;
- error = delete_or_rename_table(from, to, false);
- if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
- error == DB_LOCK_NOTGRANTED) {
+ bool to_db_dir_exist = tokudb_check_db_dir_exist_from_table_name(to);
+ if (!to_db_dir_exist) {
sql_print_error(
- "Could not rename table from %s to %s because another transaction "
- "has accessed the table. To rename the table, make sure no "
- "transactions touch the table.",
+ "Could not rename table from %s to %s because "
+ "destination db does not exist",
from,
to);
+#ifndef __WIN__
+ /* Small hack. tokudb_check_db_dir_exist_from_table_name calls
+ * my_access, which sets my_errno on Windows, but doesn't on
+ * unix. Set it for unix too.
+ */
+ my_errno= errno;
+#endif
+ error= my_errno;
+ }
+ else {
+ error = delete_or_rename_table(from, to, false);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not rename table from %s to %s because another transaction "
+ "has accessed the table. To rename the table, make sure no "
+ "transactions touch the table.",
+ from,
+ to);
+ }
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index 713f99748f1..0629c41b3f3 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -816,6 +816,8 @@ public:
int index_first(uchar * buf);
int index_last(uchar * buf);
+ bool has_gap_locks() const { return true; }
+
int rnd_init(bool scan);
int rnd_end();
int rnd_next(uchar * buf);
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
new file mode 100644
index 00000000000..74148bd4e74
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
@@ -0,0 +1,46 @@
+SET GLOBAL tokudb_dir_per_db=true;
+######
+# Tokudb and mysql data dirs are the same, rename to existent db
+###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+The content of "test" directory:
+The content of "new_db" directory:
+db.opt
+t1.frm
+t1_main_id.tokudb
+t1_status_id.tokudb
+DROP DATABASE new_db;
+######
+# Tokudb and mysql data dirs are the same, rename to nonexistent db
+###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+ALTER TABLE test.t1 RENAME foo.t1;
+ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 2 "No such file or directory")
+DROP TABLE t1;
+SELECT @@tokudb_data_dir;
+@@tokudb_data_dir
+NULL
+SELECT @@tokudb_dir_per_db;
+@@tokudb_dir_per_db
+0
+######
+# Tokudb and mysql data dirs are different, rename to existent db
+###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+The content of "test" direcotry:
+The content of "new_db" directory:
+DROP DATABASE new_db;
+######
+# Tokudb and mysql data dirs are different, rename to nonexistent db
+###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+ALTER TABLE test.t1 RENAME foo.t1;
+ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 2 "No such file or directory")
+DROP TABLE t1;
+SET GLOBAL tokudb_dir_per_db=default;
diff --git a/storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result b/storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
new file mode 100644
index 00000000000..41e294f7d8d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
@@ -0,0 +1,469 @@
+CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT,
+PRIMARY KEY (id1, id2, id3),
+INDEX i (c1)) ENGINE=tokudb;
+CREATE TABLE gap2 like gap1;
+CREATE TABLE gap3 (id INT, value INT,
+PRIMARY KEY (id),
+UNIQUE KEY ui(value)) ENGINE=tokudb;
+CREATE TABLE gap4 (id INT, value INT,
+PRIMARY KEY (id)) ENGINE=tokudb
+PARTITION BY HASH(id) PARTITIONS 2;
+insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5);
+insert into gap4 values (1,1), (2,2),(3,3),(4,4),(5,5);
+set session autocommit=0;
+select * from gap1 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 for update;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 for update;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 for update;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 for update;
+id value
+1 1
+select * from gap4 where id=1 for update;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) for update;
+id value
+1 1
+2 2
+3 3
+select * from gap4 for update;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 for update;
+id value
+3 3
+4 4
+5 5
+set session autocommit=1;
+select * from gap1 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 for update;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 for update;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 for update;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 for update;
+id value
+1 1
+select * from gap4 where id=1 for update;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) for update;
+id value
+1 1
+2 2
+3 3
+select * from gap4 for update;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 for update;
+id value
+3 3
+4 4
+5 5
+set session autocommit=0;
+select * from gap1 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 lock in share mode;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 lock in share mode;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) lock in share mode;
+id value
+1 1
+2 2
+3 3
+select * from gap4 lock in share mode;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 lock in share mode;
+id value
+3 3
+4 4
+5 5
+set session autocommit=1;
+select * from gap1 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 lock in share mode;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 lock in share mode;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) lock in share mode;
+id value
+1 1
+2 2
+3 3
+select * from gap4 lock in share mode;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 lock in share mode;
+id value
+3 3
+4 4
+5 5
+set session autocommit=0;
+select * from gap1 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 ;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 ;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 ;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 ;
+id value
+1 1
+select * from gap4 where id=1 ;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) ;
+id value
+1 1
+2 2
+3 3
+select * from gap4 ;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 ;
+id value
+3 3
+4 4
+5 5
+set session autocommit=1;
+select * from gap1 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 ;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 ;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 ;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 ;
+id value
+1 1
+select * from gap4 where id=1 ;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) ;
+id value
+1 1
+2 2
+3 3
+select * from gap4 ;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 ;
+id value
+3 3
+4 4
+5 5
+set session autocommit=0;
+insert into gap1 (id1, id2, id3) values (-1,-1,-1);
+insert into gap1 (id1, id2, id3) values (-1,-1,-1)
+on duplicate key update value=100;
+update gap1 set value=100 where id1=1;
+update gap1 set value=100 where id1=1 and id2=1 and id3=1;
+delete from gap1 where id1=2;
+delete from gap1 where id1=-1 and id2=-1 and id3=-1;
+commit;
+set session autocommit=1;
+insert into gap1 (id1, id2, id3) values (-1,-1,-1);
+insert into gap1 (id1, id2, id3) values (-1,-1,-1)
+on duplicate key update value=100;
+update gap1 set value=100 where id1=1;
+update gap1 set value=100 where id1=1 and id2=1 and id3=1;
+delete from gap1 where id1=2;
+delete from gap1 where id1=-1 and id2=-1 and id3=-1;
+commit;
+drop table gap1, gap2, gap3, gap4;
diff --git a/storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result b/storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result
index 8b31bf5a280..c1f05cdafd8 100644
--- a/storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result
+++ b/storage/tokudb/mysql-test/tokudb/r/locks-select-update-3.result
@@ -8,6 +8,7 @@ select * from t where a=1 for update;
a b
1 0
update t set b=b+1 where a=1;
+set session tokudb_lock_timeout= 60000;
set session transaction isolation level read committed;
begin;
select * from t where a=1 for update;
diff --git a/storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
new file mode 100644
index 00000000000..089d1d2b136
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
@@ -0,0 +1,43 @@
+SET default_storage_engine=TokuDB;
+#
+# Test kill_idle_transaction_timeout feature with TokuDB
+#
+CREATE TABLE t1 (a INT);
+SET GLOBAL kill_idle_transaction= 1;
+BEGIN;
+INSERT INTO t1 VALUES (1),(2);
+COMMIT;
+SELECT * FROM t1;
+a
+1
+2
+BEGIN;
+INSERT INTO t1 VALUES (3);
+# Current connection idle transaction killed, reconnecting
+SELECT * FROM t1;
+a
+1
+2
+#
+# Test that row locks are released on idle transaction kill
+#
+SET GLOBAL kill_idle_transaction= 2;
+# Take row locks in connection conn1
+BEGIN;
+SELECT * FROM t1 FOR UPDATE;
+a
+1
+2
+# Take row locks in connection default
+UPDATE t1 SET a=4;
+SELECT * FROM t1;
+a
+4
+4
+# Show that connection conn1 has been killed
+SELECT * FROM t1;
+ERROR HY000: MySQL server has gone away
+# connection default
+# Cleanup
+DROP TABLE t1;
+SET GLOBAL kill_idle_transaction= saved_kill_idle_transaction;
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
new file mode 100644
index 00000000000..17fe0188a6e
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
@@ -0,0 +1,64 @@
+--source include/have_tokudb.inc
+
+SET GLOBAL tokudb_dir_per_db=true;
+--let DATADIR=`SELECT @@datadir`
+
+--echo ######
+--echo # Tokudb and mysql data dirs are the same, rename to existent db
+--echo ###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+--echo The content of "test" directory:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+--list_files $DATADIR/test
+--echo The content of "new_db" directory:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+--list_files $DATADIR/new_db
+DROP DATABASE new_db;
+
+--echo ######
+--echo # Tokudb and mysql data dirs are the same, rename to nonexistent db
+--echo ###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+--error ER_ERROR_ON_RENAME
+ALTER TABLE test.t1 RENAME foo.t1;
+DROP TABLE t1;
+
+--let $custom_tokudb_data_dir=$MYSQL_TMP_DIR/custom_tokudb_data_dir
+--mkdir $custom_tokudb_data_dir
+--replace_result $custom_tokudb_data_dir CUSTOM_TOKUDB_DATA_DIR
+--source include/restart_mysqld.inc
+
+--replace_result $custom_tokudb_data_dir CUSTOM_TOKUDB_DATA_DIR
+SELECT @@tokudb_data_dir;
+SELECT @@tokudb_dir_per_db;
+
+--echo ######
+--echo # Tokudb and mysql data dirs are different, rename to existent db
+--echo ###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+--echo The content of "test" direcotry:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+--echo The content of "new_db" directory:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+DROP DATABASE new_db;
+
+--echo ######
+--echo # Tokudb and mysql data dirs are different, rename to nonexistent db
+--echo ###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+--error ER_ERROR_ON_RENAME
+ALTER TABLE test.t1 RENAME foo.t1;
+DROP TABLE t1;
+
+SET GLOBAL tokudb_dir_per_db=default;
+
diff --git a/storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test b/storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
new file mode 100644
index 00000000000..8c52cef9e27
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
@@ -0,0 +1,5 @@
+--source include/have_tokudb.inc
+
+let $engine=tokudb;
+let $expect_gap_lock_errors=0;
+--source include/gap_lock_error_all.inc
diff --git a/storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test b/storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test
index a563f061add..5b54fa7313e 100644
--- a/storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test
+++ b/storage/tokudb/mysql-test/tokudb/t/locks-select-update-3.test
@@ -15,6 +15,7 @@ select * from t where a=1 for update;
# t2 update
update t set b=b+1 where a=1;
connect(conn1,localhost,root);
+set session tokudb_lock_timeout= 60000;
set session transaction isolation level read committed;
begin;
# t2 select for update, should hang until t1 commits
diff --git a/storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test b/storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
new file mode 100644
index 00000000000..743fb9a55a7
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
@@ -0,0 +1,5 @@
+--source include/have_tokudb.inc
+--skip MariaDB doesn't support kill_idle_trx variable for all SE
+
+SET default_storage_engine=TokuDB;
+--source include/percona_kill_idle_trx.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
index 0d80cf85a91..5d4cb245e27 100644
--- a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
@@ -1 +1 @@
-$TOKUDB_OPT $TOKUDB_LOAD_ADD $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
+$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc
index ecea98fccfe..6bf8823124c 100644
--- a/storage/xtradb/btr/btr0btr.cc
+++ b/storage/xtradb/btr/btr0btr.cc
@@ -3400,7 +3400,7 @@ Removes a page from the level list of pages.
/*************************************************************//**
Removes a page from the level list of pages. */
-static MY_ATTRIBUTE((nonnull))
+static
void
btr_level_list_remove_func(
/*=======================*/
@@ -3416,8 +3416,6 @@ btr_level_list_remove_func(
ulint prev_page_no;
ulint next_page_no;
- ut_ad(page != NULL);
- ut_ad(mtr != NULL);
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(space == page_get_space_id(page));
/* Get the previous and next page numbers of page */
diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc
index 214d050d562..1705de7ce36 100644
--- a/storage/xtradb/btr/btr0cur.cc
+++ b/storage/xtradb/btr/btr0cur.cc
@@ -1782,7 +1782,7 @@ btr_cur_pessimistic_insert(
/*************************************************************//**
For an update, checks the locks and does the undo logging.
@return DB_SUCCESS, DB_WAIT_LOCK, or error number */
-UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,6,7)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
dberr_t
btr_cur_upd_lock_and_undo(
/*======================*/
@@ -2014,7 +2014,6 @@ btr_cur_update_alloc_zip_func(
const page_t* page = page_cur_get_page(cursor);
ut_ad(page_zip == page_cur_get_page_zip(cursor));
- ut_ad(page_zip);
ut_ad(!dict_index_is_ibuf(index));
ut_ad(rec_offs_validate(page_cur_get_rec(cursor), index, offsets));
@@ -3141,7 +3140,7 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(page_is_leaf(page_align(rec)));
#ifdef UNIV_DEBUG
- if (btr_cur_print_record_ops && (thr != NULL)) {
+ if (btr_cur_print_record_ops) {
btr_cur_trx_report(thr_get_trx(thr)->id, index, "del mark ");
rec_print_new(stderr, rec, offsets);
}
@@ -3855,19 +3854,42 @@ inexact:
return(n_rows);
}
-/*******************************************************************//**
-Estimates the number of rows in a given index range.
-@return estimated number of rows */
-UNIV_INTERN
-ib_int64_t
-btr_estimate_n_rows_in_range(
-/*=========================*/
- dict_index_t* index, /*!< in: index */
- const dtuple_t* tuple1, /*!< in: range start, may also be empty tuple */
- ulint mode1, /*!< in: search mode for range start */
- const dtuple_t* tuple2, /*!< in: range end, may also be empty tuple */
- ulint mode2, /*!< in: search mode for range end */
- trx_t* trx) /*!< in: trx */
+/** If the tree gets changed too much between the two dives for the left
+and right boundary then btr_estimate_n_rows_in_range_low() will retry
+that many times before giving up and returning the value stored in
+rows_in_range_arbitrary_ret_val. */
+static const unsigned rows_in_range_max_retries = 4;
+
+/** We pretend that a range has that many records if the tree keeps changing
+for rows_in_range_max_retries retries while we try to estimate the records
+in a given range. */
+static const int64_t rows_in_range_arbitrary_ret_val = 10;
+
+/** Estimates the number of rows in a given index range.
+@param[in] index index
+@param[in] tuple1 range start, may also be empty tuple
+@param[in] mode1 search mode for range start
+@param[in] tuple2 range end, may also be empty tuple
+@param[in] mode2 search mode for range end
+@param[in] trx trx
+@param[in] nth_attempt if the tree gets modified too much while
+we are trying to analyze it, then we will retry (this function will call
+itself, incrementing this parameter)
+@return estimated number of rows; if after rows_in_range_max_retries
+retries the tree keeps changing, then we will just return
+rows_in_range_arbitrary_ret_val as a result (if
+nth_attempt >= rows_in_range_max_retries and the tree is modified between
+the two dives). */
+static
+int64_t
+btr_estimate_n_rows_in_range_low(
+ dict_index_t* index,
+ const dtuple_t* tuple1,
+ ulint mode1,
+ const dtuple_t* tuple2,
+ ulint mode2,
+ trx_t* trx,
+ unsigned nth_attempt)
{
btr_path_t path1[BTR_PATH_ARRAY_N_SLOTS];
btr_path_t path2[BTR_PATH_ARRAY_N_SLOTS];
@@ -3905,6 +3927,12 @@ btr_estimate_n_rows_in_range(
mtr_start_trx(&mtr, trx);
+#ifdef UNIV_DEBUG
+ if (!strcmp(index->name, "iC")) {
+ DEBUG_SYNC_C("btr_estimate_n_rows_in_range_between_dives");
+ }
+#endif
+
cursor.path_arr = path2;
if (dtuple_get_n_fields(tuple2) > 0) {
@@ -3971,6 +3999,33 @@ btr_estimate_n_rows_in_range(
if (!diverged && slot1->nth_rec != slot2->nth_rec) {
+ /* If both slots do not point to the same page or if
+ the paths have crossed and the same page on both
+ apparently contains a different number of records,
+ this means that the tree must have changed between
+ the dive for slot1 and the dive for slot2 at the
+ beginning of this function. */
+ if (slot1->page_no != slot2->page_no
+ || slot1->page_level != slot2->page_level
+ || (slot1->nth_rec >= slot2->nth_rec
+ && slot1->n_recs != slot2->n_recs)) {
+
+ /* If the tree keeps changing even after a
+ few attempts, then just return some arbitrary
+ number. */
+ if (nth_attempt >= rows_in_range_max_retries) {
+ return(rows_in_range_arbitrary_ret_val);
+ }
+
+ const int64_t ret =
+ btr_estimate_n_rows_in_range_low(
+ index, tuple1, mode1,
+ tuple2, mode2, trx,
+ nth_attempt + 1);
+
+ return(ret);
+ }
+
diverged = TRUE;
if (slot1->nth_rec < slot2->nth_rec) {
@@ -3989,7 +4044,7 @@ btr_estimate_n_rows_in_range(
in this case slot1->nth_rec will point
to the supr record and slot2->nth_rec
will point to 6 */
- n_rows = 0;
+ return(0);
}
} else if (diverged && !diverged_lot) {
@@ -4020,6 +4075,30 @@ btr_estimate_n_rows_in_range(
}
}
+/** Estimates the number of rows in a given index range.
+@param[in] index index
+@param[in] tuple1 range start, may also be empty tuple
+@param[in] mode1 search mode for range start
+@param[in] tuple2 range end, may also be empty tuple
+@param[in] mode2 search mode for range end
+@param[in] trx trx
+@return estimated number of rows */
+int64_t
+btr_estimate_n_rows_in_range(
+ dict_index_t* index,
+ const dtuple_t* tuple1,
+ ulint mode1,
+ const dtuple_t* tuple2,
+ ulint mode2,
+ trx_t* trx)
+{
+ const int64_t ret = btr_estimate_n_rows_in_range_low(
+ index, tuple1, mode1, tuple2, mode2, trx,
+ 1 /* first attempt */);
+
+ return(ret);
+}
+
/*******************************************************************//**
Record the number of non_null key values in a given index for
each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
@@ -4482,7 +4561,6 @@ btr_cur_disown_inherited_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(mtr);
for (i = 0; i < rec_offs_n_fields(offsets); i++) {
if (rec_offs_nth_extern(offsets, i)
@@ -4545,9 +4623,6 @@ btr_push_update_extern_fields(
ulint n;
const upd_field_t* uf;
- ut_ad(tuple);
- ut_ad(update);
-
uf = update->fields;
n = upd_get_n_fields(update);
@@ -4731,7 +4806,6 @@ btr_store_big_rec_extern_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(btr_mtr);
ut_ad(mtr_memo_contains(btr_mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX));
diff --git a/storage/xtradb/buf/buf0buddy.cc b/storage/xtradb/buf/buf0buddy.cc
index 8cb880c1169..2ee39c6c992 100644
--- a/storage/xtradb/buf/buf0buddy.cc
+++ b/storage/xtradb/buf/buf0buddy.cc
@@ -485,7 +485,6 @@ buf_buddy_alloc_low(
{
buf_block_t* block;
- ut_ad(lru);
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 21b10196a25..520997591e8 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -3691,15 +3691,6 @@ buf_page_init(
/* Set the state of the block */
buf_block_set_file_page(block, space, offset);
-#ifdef UNIV_DEBUG_VALGRIND
- if (!space) {
- /* Silence valid Valgrind warnings about uninitialized
- data being written to data files. There are some unused
- bytes on some pages that InnoDB does not initialize. */
- UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
- }
-#endif /* UNIV_DEBUG_VALGRIND */
-
buf_block_init_low(block);
block->lock_hash_val = lock_rec_hash(space, offset);
diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc
index cae6099f03e..df62c945288 100644
--- a/storage/xtradb/buf/buf0dump.cc
+++ b/storage/xtradb/buf/buf0dump.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -52,8 +53,8 @@ enum status_severity {
/* Flags that tell the buffer pool dump/load thread which action should it
take after being waked up. */
-static ibool buf_dump_should_start = FALSE;
-static ibool buf_load_should_start = FALSE;
+static volatile bool buf_dump_should_start;
+static volatile bool buf_load_should_start;
static ibool buf_load_abort_flag = FALSE;
@@ -78,7 +79,7 @@ void
buf_dump_start()
/*============*/
{
- buf_dump_should_start = TRUE;
+ buf_dump_should_start = true;
os_event_set(srv_buf_dump_event);
}
@@ -92,7 +93,7 @@ void
buf_load_start()
/*============*/
{
- buf_load_should_start = TRUE;
+ buf_load_should_start = true;
os_event_set(srv_buf_dump_event);
}
@@ -695,15 +696,18 @@ DECLARE_THREAD(buf_dump_thread)(
os_event_wait(srv_buf_dump_event);
if (buf_dump_should_start) {
- buf_dump_should_start = FALSE;
+ buf_dump_should_start = false;
buf_dump(TRUE /* quit on shutdown */);
}
if (buf_load_should_start) {
- buf_load_should_start = FALSE;
+ buf_load_should_start = false;
buf_load();
}
+ if (buf_dump_should_start || buf_load_should_start) {
+ continue;
+ }
os_event_reset(srv_buf_dump_event);
}
diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc
index 601e79d8923..018b29b7d95 100644
--- a/storage/xtradb/buf/buf0flu.cc
+++ b/storage/xtradb/buf/buf0flu.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -720,9 +721,6 @@ buf_flush_write_complete(
buf_pool->n_flush[flush_type]--;
- /* fprintf(stderr, "n pending flush %lu\n",
- buf_pool->n_flush[flush_type]); */
-
if (buf_pool->n_flush[flush_type] == 0
&& buf_pool->init_flush[flush_type] == FALSE) {
diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc
index c599653706d..1aa2e7a9eb7 100644
--- a/storage/xtradb/buf/buf0lru.cc
+++ b/storage/xtradb/buf/buf0lru.cc
@@ -1301,6 +1301,71 @@ buf_LRU_check_size_of_non_data_objects(
}
}
+/** Diagnose failure to get a free page and request InnoDB monitor output in
+the error log if more than two seconds have been spent already.
+@param[in] n_iterations how many buf_LRU_get_free_page iterations
+ already completed
+@param[in] started_ms timestamp in ms of when the attempt to get the
+ free page started
+@param[in] flush_failures how many times single-page flush, if allowed,
+ has failed
+@param[out] mon_value_was previous srv_print_innodb_monitor value
+@param[out] started_monitor whether InnoDB monitor print has been requested
+*/
+static
+void
+buf_LRU_handle_lack_of_free_blocks(ulint n_iterations, ulint started_ms,
+ ulint flush_failures,
+ ibool *mon_value_was,
+ ibool *started_monitor)
+{
+ static ulint last_printout_ms = 0;
+
+ /* Legacy algorithm started warning after at least 2 seconds, we
+ emulate this. */
+ const ulint current_ms = ut_time_ms();
+
+ if ((current_ms > started_ms + 2000)
+ && (current_ms > last_printout_ms + 2000)) {
+
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Warning: difficult to find free blocks in\n"
+ "InnoDB: the buffer pool (%lu search iterations)!\n"
+ "InnoDB: %lu failed attempts to flush a page!"
+ " Consider\n"
+ "InnoDB: increasing the buffer pool size.\n"
+ "InnoDB: It is also possible that"
+ " in your Unix version\n"
+ "InnoDB: fsync is very slow, or"
+ " completely frozen inside\n"
+ "InnoDB: the OS kernel. Then upgrading to"
+ " a newer version\n"
+ "InnoDB: of your operating system may help."
+ " Look at the\n"
+ "InnoDB: number of fsyncs in diagnostic info below.\n"
+ "InnoDB: Pending flushes (fsync) log: %lu;"
+ " buffer pool: %lu\n"
+ "InnoDB: %lu OS file reads, %lu OS file writes,"
+ " %lu OS fsyncs\n"
+ "InnoDB: Starting InnoDB Monitor to print further\n"
+ "InnoDB: diagnostics to the standard output.\n",
+ (ulong) n_iterations,
+ (ulong) flush_failures,
+ (ulong) fil_n_pending_log_flushes,
+ (ulong) fil_n_pending_tablespace_flushes,
+ (ulong) os_n_file_reads, (ulong) os_n_file_writes,
+ (ulong) os_n_fsyncs);
+
+ last_printout_ms = current_ms;
+ *mon_value_was = srv_print_innodb_monitor;
+ *started_monitor = TRUE;
+ srv_print_innodb_monitor = TRUE;
+ os_event_set(lock_sys->timeout_event);
+ }
+
+}
+
/** The maximum allowed backoff sleep time duration, microseconds */
#define MAX_FREE_LIST_BACKOFF_SLEEP 10000
@@ -1348,6 +1413,7 @@ buf_LRU_get_free_block(
ulint flush_failures = 0;
ibool mon_value_was = FALSE;
ibool started_monitor = FALSE;
+ ulint started_ms = 0;
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
@@ -1356,7 +1422,24 @@ loop:
buf_LRU_check_size_of_non_data_objects(buf_pool);
/* If there is a block in the free list, take it */
- block = buf_LRU_get_free_only(buf_pool);
+ if (DBUG_EVALUATE_IF("simulate_lack_of_pages", true, false)) {
+
+ block = NULL;
+
+ if (srv_debug_monitor_printed)
+ DBUG_SET("-d,simulate_lack_of_pages");
+
+ } else if (DBUG_EVALUATE_IF("simulate_recovery_lack_of_pages",
+ recv_recovery_on, false)) {
+
+ block = NULL;
+
+ if (srv_debug_monitor_printed)
+ DBUG_SUICIDE();
+ } else {
+
+ block = buf_LRU_get_free_only(buf_pool);
+ }
if (block) {
@@ -1371,6 +1454,9 @@ loop:
return(block);
}
+ if (!started_ms)
+ started_ms = ut_time_ms();
+
if (srv_empty_free_list_algorithm == SRV_EMPTY_FREE_LIST_BACKOFF
&& buf_lru_manager_is_active
&& (srv_shutdown_state == SRV_SHUTDOWN_NONE
@@ -1408,11 +1494,17 @@ loop:
: FREE_LIST_BACKOFF_LOW_PRIO_DIVIDER));
}
- /* In case of backoff, do not ever attempt single page flushes
- and wait for the cleaner to free some pages instead. */
+ buf_LRU_handle_lack_of_free_blocks(n_iterations, started_ms,
+ flush_failures,
+ &mon_value_was,
+ &started_monitor);
n_iterations++;
+ srv_stats.buf_pool_wait_free.add(n_iterations, 1);
+
+ /* In case of backoff, do not ever attempt single page flushes
+ and wait for the cleaner to free some pages instead. */
goto loop;
} else {
@@ -1444,6 +1536,12 @@ loop:
mutex_exit(&buf_pool->flush_state_mutex);
+ if (DBUG_EVALUATE_IF("simulate_recovery_lack_of_pages", true, false)
+ || DBUG_EVALUATE_IF("simulate_lack_of_pages", true, false)) {
+
+ buf_pool->try_LRU_scan = false;
+ }
+
freed = FALSE;
if (buf_pool->try_LRU_scan || n_iterations > 0) {
@@ -1469,41 +1567,9 @@ loop:
}
- if (n_iterations > 20) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: difficult to find free blocks in\n"
- "InnoDB: the buffer pool (%lu search iterations)!\n"
- "InnoDB: %lu failed attempts to flush a page!"
- " Consider\n"
- "InnoDB: increasing the buffer pool size.\n"
- "InnoDB: It is also possible that"
- " in your Unix version\n"
- "InnoDB: fsync is very slow, or"
- " completely frozen inside\n"
- "InnoDB: the OS kernel. Then upgrading to"
- " a newer version\n"
- "InnoDB: of your operating system may help."
- " Look at the\n"
- "InnoDB: number of fsyncs in diagnostic info below.\n"
- "InnoDB: Pending flushes (fsync) log: %lu;"
- " buffer pool: %lu\n"
- "InnoDB: %lu OS file reads, %lu OS file writes,"
- " %lu OS fsyncs\n"
- "InnoDB: Starting InnoDB Monitor to print further\n"
- "InnoDB: diagnostics to the standard output.\n",
- (ulong) n_iterations,
- (ulong) flush_failures,
- (ulong) fil_n_pending_log_flushes,
- (ulong) fil_n_pending_tablespace_flushes,
- (ulong) os_n_file_reads, (ulong) os_n_file_writes,
- (ulong) os_n_fsyncs);
-
- mon_value_was = srv_print_innodb_monitor;
- started_monitor = TRUE;
- srv_print_innodb_monitor = TRUE;
- os_event_set(srv_monitor_event);
- }
+ buf_LRU_handle_lack_of_free_blocks(n_iterations, started_ms,
+ flush_failures, &mon_value_was,
+ &started_monitor);
/* If we have scanned the whole LRU and still are unable to
find a free block then we should sleep here to let the
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 83e950b9334..13dd1d0c26c 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -6178,7 +6178,6 @@ dict_set_corrupted(
row_mysql_lock_data_dictionary(trx);
}
- ut_ad(index);
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(!dict_table_is_comp(dict_sys->sys_tables));
ut_ad(!dict_table_is_comp(dict_sys->sys_indexes));
diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc
index a4aa43651f8..c0a83c951a5 100644
--- a/storage/xtradb/dict/dict0stats.cc
+++ b/storage/xtradb/dict/dict0stats.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2009, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1109,7 +1109,8 @@ dict_stats_analyze_index_level(
them away) which brings non-determinism. We skip only
leaf-level delete marks because delete marks on
non-leaf level do not make sense. */
- if (level == 0 &&
+
+ if (level == 0 && srv_stats_include_delete_marked? 0:
rec_get_deleted_flag(
rec,
page_is_comp(btr_pcur_get_page(&pcur)))) {
@@ -1295,8 +1296,12 @@ enum page_scan_method_t {
the given page and count the number of
distinct ones, also ignore delete marked
records */
- QUIT_ON_FIRST_NON_BORING/* quit when the first record that differs
+ QUIT_ON_FIRST_NON_BORING,/* quit when the first record that differs
from its right neighbor is found */
+ COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED/* scan all records on
+ the given page and count the number of
+ distinct ones, include delete marked
+ records */
};
/* @} */
@@ -1572,6 +1577,8 @@ dict_stats_analyze_index_below_cur(
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
+ srv_stats_include_delete_marked ?
+ COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED:
COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff,
n_external_pages);
diff --git a/storage/xtradb/dict/dict0stats_bg.cc b/storage/xtradb/dict/dict0stats_bg.cc
index 6f01c379776..b3c8ef018f7 100644
--- a/storage/xtradb/dict/dict0stats_bg.cc
+++ b/storage/xtradb/dict/dict0stats_bg.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,8 +40,9 @@ Created Apr 25, 2012 Vasil Dimov
#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE)
-/** Event to wake up the stats thread */
-UNIV_INTERN os_event_t dict_stats_event = NULL;
+/** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add()
+or shutdown. Not protected by any mutex. */
+UNIV_INTERN os_event_t dict_stats_event;
/** This mutex protects the "recalc_pool" variable. */
static ib_mutex_t recalc_pool_mutex;
diff --git a/storage/xtradb/dyn/dyn0dyn.cc b/storage/xtradb/dyn/dyn0dyn.cc
index 3ef5297a7c9..dd1f6863c14 100644
--- a/storage/xtradb/dyn/dyn0dyn.cc
+++ b/storage/xtradb/dyn/dyn0dyn.cc
@@ -40,7 +40,6 @@ dyn_array_add_block(
mem_heap_t* heap;
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc
index a7b0377d2a4..1a866a693ca 100644
--- a/storage/xtradb/fil/fil0fil.cc
+++ b/storage/xtradb/fil/fil0fil.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -4993,15 +4994,12 @@ fil_extend_space_to_desired_size(
byte* buf;
ulint buf_size;
ulint start_page_no;
- ulint file_start_page_no;
ulint page_size;
- ulint pages_added;
ibool success;
ut_ad(!srv_read_only_mode);
retry:
- pages_added = 0;
success = TRUE;
fil_mutex_enter_and_prepare_for_io(space_id);
@@ -5055,27 +5053,33 @@ retry:
mutex_exit(&fil_system->mutex);
start_page_no = space->size;
- file_start_page_no = space->size - node->size;
-
+ const ulint file_start_page_no = space->size - node->size;
#ifdef HAVE_POSIX_FALLOCATE
if (srv_use_posix_fallocate) {
- os_offset_t start_offset = start_page_no * page_size;
- os_offset_t n_pages = (size_after_extend - start_page_no);
- os_offset_t len = n_pages * page_size;
-
- if (posix_fallocate(node->handle, start_offset, len) == -1) {
- ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file "
- "space for file \'%s\' failed. Current size "
- INT64PF ", desired size " INT64PF,
- node->name, start_offset, len+start_offset);
- os_file_handle_error_no_exit(node->name, "posix_fallocate", FALSE);
- success = FALSE;
- } else {
- success = TRUE;
+ const os_offset_t start_offset
+ = os_offset_t(start_page_no - file_start_page_no)
+ * page_size;
+ const ulint n_pages
+ = size_after_extend - start_page_no;
+ const os_offset_t len = os_offset_t(n_pages) * page_size;
+
+ int err;
+ do {
+ err = posix_fallocate(node->handle, start_offset, len);
+ } while (err == EINTR
+ && srv_shutdown_state == SRV_SHUTDOWN_NONE);
+
+ success = !err;
+ if (!success) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
+ " from " INT64PF " to " INT64PF " bytes"
+ " failed with error %d",
+ node->name, start_offset, len + start_offset,
+ err);
}
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
- success = FALSE; errno = 28;os_has_said_disk_full = TRUE;);
+ success = FALSE; os_has_said_disk_full = TRUE;);
mutex_enter(&fil_system->mutex);
@@ -5095,14 +5099,24 @@ retry:
}
#endif
+#ifdef _WIN32
+ /* Write 1 page of zeroes at the desired end. */
+ start_page_no = size_after_extend - 1;
+ buf_size = page_size;
+#else
/* Extend at most 64 pages at a time */
buf_size = ut_min(64, size_after_extend - start_page_no) * page_size;
- buf2 = static_cast<byte*>(mem_alloc(buf_size + page_size));
+#endif
+ buf2 = static_cast<byte*>(calloc(1, buf_size + page_size));
+ if (!buf2) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Cannot allocate " ULINTPF
+ " bytes to extend file",
+ buf_size + page_size);
+ success = FALSE;
+ }
buf = static_cast<byte*>(ut_align(buf2, page_size));
- memset(buf, 0, buf_size);
-
- while (start_page_no < size_after_extend) {
+ while (success && start_page_no < size_after_extend) {
ulint n_pages
= ut_min(buf_size / page_size,
size_after_extend - start_page_no);
@@ -5111,55 +5125,47 @@ retry:
= ((os_offset_t) (start_page_no - file_start_page_no))
* page_size;
- const char* name = node->name == NULL ? space->name : node->name;
-
#ifdef UNIV_HOTBACKUP
- success = os_file_write(name, node->handle, buf,
+ success = os_file_write(node->name, node->handle, buf,
offset, page_size * n_pages);
#else
success = os_aio(OS_FILE_WRITE, OS_AIO_SYNC,
- name, node->handle, buf,
+ node->name, node->handle, buf,
offset, page_size * n_pages,
NULL, NULL, space_id, NULL);
#endif /* UNIV_HOTBACKUP */
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
- success = FALSE; errno = 28; os_has_said_disk_full = TRUE;);
-
- if (success) {
- os_has_said_disk_full = FALSE;
- } else {
- /* Let us measure the size of the file to determine
- how much we were able to extend it */
- os_offset_t size;
-
- size = os_file_get_size(node->handle);
- ut_a(size != (os_offset_t) -1);
+ success = FALSE; os_has_said_disk_full = TRUE;);
- n_pages = ((ulint) (size / page_size))
- - node->size - pages_added;
+ /* Let us measure the size of the file to determine
+ how much we were able to extend it */
+ os_offset_t size = os_file_get_size(node->handle);
+ ut_a(size != (os_offset_t) -1);
- pages_added += n_pages;
- break;
- }
-
- start_page_no += n_pages;
- pages_added += n_pages;
+ start_page_no = (ulint) (size / page_size)
+ + file_start_page_no;
}
- mem_free(buf2);
+ free(buf2);
mutex_enter(&fil_system->mutex);
ut_a(node->being_extended);
+ ut_a(start_page_no - file_start_page_no >= node->size);
- space->size += pages_added;
- node->size += pages_added;
+ if (buf) {
+ ulint file_size = start_page_no - file_start_page_no;
+ space->size += file_size - node->size;
+ node->size = file_size;
+ }
fil_node_complete_io(node, fil_system, OS_FILE_WRITE);
/* At this point file has been extended */
+#ifdef HAVE_POSIX_FALLOCATE
file_extended:
+#endif /* HAVE_POSIX_FALLOCATE */
node->being_extended = FALSE;
*actual_size = space->size;
diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc
index 0bdb2ea0d4c..9292f84ab02 100644
--- a/storage/xtradb/fsp/fsp0fsp.cc
+++ b/storage/xtradb/fsp/fsp0fsp.cc
@@ -132,7 +132,7 @@ fsp_fill_free_list(
ulint space, /*!< in: space */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
@@ -161,7 +161,7 @@ fseg_alloc_free_page_low(
in which the page should be initialized.
If init_mtr!=mtr, but the page is already
latched in mtr, do not initialize the page. */
- MY_ATTRIBUTE((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1067,8 +1067,6 @@ fsp_fill_free_list(
ulint i;
mtr_t ibuf_mtr;
- ut_ad(header != NULL);
- ut_ad(mtr != NULL);
ut_ad(page_offset(header) == FSP_HEADER_OFFSET);
/* Check if we can fill free list from above the free list limit */
@@ -1331,7 +1329,7 @@ Allocates a single free page from a space. The page is marked as used.
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
buf_block_t*
fsp_alloc_free_page(
/*================*/
@@ -1351,9 +1349,6 @@ fsp_alloc_free_page(
ulint page_no;
ulint space_size;
- ut_ad(mtr);
- ut_ad(init_mtr);
-
header = fsp_get_space_header(space, zip_size, mtr);
/* Get the hinted descriptor */
@@ -2372,7 +2367,6 @@ fseg_alloc_free_page_low(
ibool success;
ulint n;
- ut_ad(mtr);
ut_ad((direction >= FSP_UP) && (direction <= FSP_NO_DIR));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
@@ -2810,6 +2804,7 @@ try_again:
}
} else {
ut_a(alloc_type == FSP_CLEANING);
+ reserve = 0;
}
success = fil_space_reserve_free_extents(space, n_free, n_ext);
diff --git a/storage/xtradb/fts/fts0fts.cc b/storage/xtradb/fts/fts0fts.cc
index 4c54afae8cd..c1f0b0bd5fe 100644
--- a/storage/xtradb/fts/fts0fts.cc
+++ b/storage/xtradb/fts/fts0fts.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2016, MariaDB Corporation. All Rights reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1934,6 +1935,8 @@ func_exit:
/*************************************************************//**
Wrapper function of fts_create_index_tables_low(), create auxiliary
tables for an FTS index
+
+@see row_merge_create_fts_sort_index()
@return: DB_SUCCESS or error code */
static
dict_table_t*
@@ -1965,13 +1968,12 @@ fts_create_one_index_table(
(int)(field->col->prtype & DATA_MYSQL_TYPE_MASK),
(uint) dtype_get_charset_coll(field->col->prtype));
- if (strcmp(charset->name, "latin1_swedish_ci") == 0) {
- dict_mem_table_add_col(new_table, heap, "word", DATA_VARCHAR,
- field->col->prtype, FTS_MAX_WORD_LEN);
- } else {
- dict_mem_table_add_col(new_table, heap, "word", DATA_VARMYSQL,
- field->col->prtype, FTS_MAX_WORD_LEN);
- }
+ dict_mem_table_add_col(new_table, heap, "word",
+ charset == &my_charset_latin1
+ ? DATA_VARCHAR : DATA_VARMYSQL,
+ field->col->prtype,
+ FTS_MAX_WORD_LEN_IN_CHAR
+ * DATA_MBMAXLEN(field->col->mbminmaxlen));
dict_mem_table_add_col(new_table, heap, "first_doc_id", DATA_INT,
DATA_NOT_NULL | DATA_UNSIGNED,
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index ea937c20752..cb30122adcb 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2016, MariaDB Corporation. All Rights reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -281,7 +282,7 @@ fts_zip_initialize(
zip->last_big_block = 0;
zip->word.f_len = 0;
- memset(zip->word.f_str, 0, FTS_MAX_WORD_LEN);
+ *zip->word.f_str = 0;
ib_vector_reset(zip->blocks);
@@ -578,9 +579,6 @@ fts_zip_read_word(
fts_zip_t* zip, /*!< in: Zip state + data */
fts_string_t* word) /*!< out: uncompressed word */
{
-#ifdef UNIV_DEBUG
- ulint i;
-#endif
short len = 0;
void* null = NULL;
byte* ptr = word->f_str;
@@ -655,10 +653,9 @@ fts_zip_read_word(
}
}
-#ifdef UNIV_DEBUG
/* All blocks must be freed at end of inflate. */
if (zip->status != Z_OK) {
- for (i = 0; i < ib_vector_size(zip->blocks); ++i) {
+ for (ulint i = 0; i < ib_vector_size(zip->blocks); ++i) {
if (ib_vector_getp(zip->blocks, i)) {
ut_free(ib_vector_getp(zip->blocks, i));
ib_vector_set(zip->blocks, i, &null);
@@ -669,7 +666,6 @@ fts_zip_read_word(
if (ptr != NULL) {
ut_ad(word->f_len == strlen((char*) ptr));
}
-#endif /* UNIV_DEBUG */
return(zip->status == Z_OK || zip->status == Z_STREAM_END ? ptr : NULL);
}
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index ea94f90cbf8..9cb7d77ca84 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -1556,6 +1556,7 @@ static bool innobase_purge_archive_logs(
}
#endif
+
/*************************************************************//**
Check for a valid value of innobase_commit_concurrency.
@return 0 for valid innodb_commit_concurrency */
@@ -4069,19 +4070,13 @@ innobase_change_buffering_inited_ok:
and consequently we do not need to know the ordering internally in
InnoDB. */
- ut_a(0 == strcmp(my_charset_latin1.name, "latin1_swedish_ci"));
srv_latin1_ordering = my_charset_latin1.sort_order;
innobase_commit_concurrency_init_default();
-#ifndef EXTENDED_FOR_KILLIDLE
- srv_kill_idle_transaction = 0;
-#endif
-
#ifdef HAVE_POSIX_FALLOCATE
srv_use_posix_fallocate = (ibool) innobase_use_fallocate;
#endif
-
/* Do not enable backoff algorithm for small buffer pool. */
if (!innodb_empty_free_list_algorithm_backoff_allowed(
static_cast<srv_empty_free_list_t>(
@@ -7080,18 +7075,16 @@ get_innobase_type_from_mysql_type(
case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */
if (field->binary()) {
return(DATA_BINARY);
- } else if (strcmp(field->charset()->name,
- "latin1_swedish_ci") == 0) {
+ } else if (field->charset() == &my_charset_latin1) {
return(DATA_VARCHAR);
} else {
return(DATA_VARMYSQL);
}
case MYSQL_TYPE_BIT:
- case MYSQL_TYPE_STRING: if (field->binary()) {
-
+ case MYSQL_TYPE_STRING:
+ if (field->binary()) {
return(DATA_FIXBINARY);
- } else if (strcmp(field->charset()->name,
- "latin1_swedish_ci") == 0) {
+ } else if (field->charset() == &my_charset_latin1) {
return(DATA_CHAR);
} else {
return(DATA_MYSQL);
@@ -13543,9 +13536,13 @@ ha_innobase::info_low(
/* If this table is already queued for
background analyze, remove it from the
queue as we are about to do the same */
- dict_mutex_enter_for_mysql();
- dict_stats_recalc_pool_del(ib_table);
- dict_mutex_exit_for_mysql();
+ if (!srv_read_only_mode) {
+
+ dict_mutex_enter_for_mysql();
+ dict_stats_recalc_pool_del(
+ ib_table);
+ dict_mutex_exit_for_mysql();
+ }
opt = DICT_STATS_RECALC_PERSISTENT;
} else {
@@ -16050,6 +16047,37 @@ ha_innobase::get_auto_increment(
ulonglong col_max_value = innobase_get_int_col_max_value(
table->next_number_field);
+ /** The following logic is needed to avoid duplicate key error
+ for autoincrement column.
+
+ (1) InnoDB gives the current autoincrement value with respect
+ to increment and offset value.
+
+ (2) Basically it does compute_next_insert_id() logic inside InnoDB
+ to avoid the current auto increment value changed by handler layer.
+
+ (3) It is restricted only for insert operations. */
+
+ if (increment > 1 && thd_sql_command(user_thd) != SQLCOM_ALTER_TABLE
+ && autoinc < col_max_value) {
+
+ ulonglong prev_auto_inc = autoinc;
+
+ autoinc = ((autoinc - 1) + increment - offset)/ increment;
+
+ autoinc = autoinc * increment + offset;
+
+ /* If autoinc exceeds the col_max_value then reset
+ to old autoinc value. Because in case of non-strict
+ sql mode, boundary value is not considered as error. */
+
+ if (autoinc >= col_max_value) {
+ autoinc = prev_auto_inc;
+ }
+
+ ut_ad(autoinc > 0);
+ }
+
/* Called for the first time ? */
if (trx->n_autoinc_rows == 0) {
@@ -18524,32 +18552,6 @@ innobase_fts_retrieve_ranking(
}
/***********************************************************************
-functions for kill session of idle transaction */
-ibool
-innobase_thd_is_idle(
-/*=================*/
- const void* thd) /*!< in: thread handle (THD*) */
-{
-#ifdef EXTENDED_FOR_KILLIDLE
- return(thd_command((const THD*) thd) == COM_SLEEP);
-#else
- return(FALSE);
-#endif
-}
-
-ib_int64_t
-innobase_thd_get_start_time(
-/*========================*/
- const void* thd) /*!< in: thread handle (THD*) */
-{
-#ifdef EXTENDED_FOR_KILLIDLE
- return((ib_int64_t)thd_start_time((const THD*) thd));
-#else
- return(0); /*dummy value*/
-#endif
-}
-
-/***********************************************************************
Free the memory for the FTS handler */
UNIV_INTERN
void
@@ -18568,19 +18570,6 @@ innobase_fts_close_ranking(
return;
}
-UNIV_INTERN
-void
-innobase_thd_kill(
-/*==============*/
- ulong thd_id)
-{
-#ifdef EXTENDED_FOR_KILLIDLE
- thd_kill(thd_id);
-#else
- return;
-#endif
-}
-
/***********************************************************************
Find and Retrieve the FTS Relevance Ranking result for doc with doc_id
of prebuilt->fts_doc_id
@@ -18778,16 +18767,6 @@ innobase_fts_retrieve_docid(
}
-ulong
-innobase_thd_get_thread_id(
-/*=======================*/
- const void* thd)
-{
- return(thd_get_thread_id((const THD*) thd));
-}
-
-
-
/***********************************************************************
Find and retrieve the size of the current result
@return number of matching rows */
@@ -19389,6 +19368,12 @@ static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite,
"Disable with --skip-innodb-doublewrite.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_BOOL(stats_include_delete_marked,
+ srv_stats_include_delete_marked,
+ PLUGIN_VAR_OPCMDARG,
+ "Scan delete marked records for persistent stat",
+ NULL, NULL, FALSE);
+
static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
"Prevent partial page writes, via atomic writes (beta). "
@@ -20031,13 +20016,6 @@ static MYSQL_SYSVAR_ULONG(force_recovery, srv_force_recovery,
"Helps to save your data in case the disk image of the database becomes corrupt.",
NULL, NULL, 0, 0, 6, 0);
-#ifndef DBUG_OFF
-static MYSQL_SYSVAR_ULONG(force_recovery_crash, srv_force_recovery_crash,
- PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
- "Kills the server during crash recovery.",
- NULL, NULL, 0, 0, 10, 0);
-#endif /* !DBUG_OFF */
-
static MYSQL_SYSVAR_ULONG(page_size, srv_page_size,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Page size to use for all InnoDB tablespaces.",
@@ -20485,6 +20463,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(data_file_path),
MYSQL_SYSVAR(data_home_dir),
MYSQL_SYSVAR(doublewrite),
+ MYSQL_SYSVAR(stats_include_delete_marked),
MYSQL_SYSVAR(api_enable_binlog),
MYSQL_SYSVAR(api_enable_mdl),
MYSQL_SYSVAR(api_disable_rowlock),
@@ -20503,9 +20482,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(use_global_flush_log_at_trx_commit),
MYSQL_SYSVAR(flush_method),
MYSQL_SYSVAR(force_recovery),
-#ifndef DBUG_OFF
- MYSQL_SYSVAR(force_recovery_crash),
-#endif /* !DBUG_OFF */
MYSQL_SYSVAR(ft_cache_size),
MYSQL_SYSVAR(ft_total_cache_size),
MYSQL_SYSVAR(ft_result_cache_limit),
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 672894f6f51..da2f0f877f4 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -148,6 +148,8 @@ class ha_innobase: public handler
int index_first(uchar * buf);
int index_last(uchar * buf);
+ bool has_gap_locks() const { return true; }
+
int rnd_init(bool scan);
int rnd_end();
int rnd_next(uchar *buf);
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 2eea4aa4a1f..4db33fab1a6 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -1843,6 +1843,7 @@ innobase_fts_check_doc_id_index_in_def(
return(FTS_NOT_EXIST_DOC_ID_INDEX);
}
+
/*******************************************************************//**
Create an index table where indexes are ordered as follows:
@@ -1911,26 +1912,11 @@ innobase_create_key_defs(
(only prefix/part of the column is indexed), MySQL will treat the
index as a PRIMARY KEY unless the table already has one. */
- if (n_add > 0 && !new_primary && got_default_clust
- && (key_info[*add].flags & HA_NOSAME)
- && !(key_info[*add].flags & HA_KEY_HAS_PART_KEY_SEG)) {
- uint key_part = key_info[*add].user_defined_key_parts;
-
- new_primary = true;
+ ut_ad(altered_table->s->primary_key == 0
+ || altered_table->s->primary_key == MAX_KEY);
- while (key_part--) {
- const uint maybe_null
- = key_info[*add].key_part[key_part].key_type
- & FIELDFLAG_MAYBE_NULL;
- DBUG_ASSERT(!maybe_null
- == !key_info[*add].key_part[key_part].
- field->real_maybe_null());
-
- if (maybe_null) {
- new_primary = false;
- break;
- }
- }
+ if (got_default_clust && !new_primary) {
+ new_primary = (altered_table->s->primary_key != MAX_KEY);
}
const bool rebuild = new_primary || add_fts_doc_id
@@ -1949,8 +1935,14 @@ innobase_create_key_defs(
ulint primary_key_number;
if (new_primary) {
- DBUG_ASSERT(n_add > 0);
- primary_key_number = *add;
+ if (n_add == 0) {
+ DBUG_ASSERT(got_default_clust);
+ DBUG_ASSERT(altered_table->s->primary_key
+ == 0);
+ primary_key_number = 0;
+ } else {
+ primary_key_number = *add;
+ }
} else if (got_default_clust) {
/* Create the GEN_CLUST_INDEX */
index_def_t* index = indexdef++;
@@ -3062,6 +3054,8 @@ prepare_inplace_alter_table_dict(
ctx->add_cols = add_cols;
} else {
DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info, old_table));
+ DBUG_ASSERT(old_table->s->primary_key
+ == altered_table->s->primary_key);
if (!ctx->new_table->fts
&& innobase_fulltext_exist(altered_table)) {
@@ -4091,6 +4085,27 @@ found_col:
add_fts_doc_id_idx, prebuilt));
}
+/** Get the name of an erroneous key.
+@param[in] error_key_num InnoDB number of the erroneus key
+@param[in] ha_alter_info changes that were being performed
+@param[in] table InnoDB table
+@return the name of the erroneous key */
+static
+const char*
+get_error_key_name(
+ ulint error_key_num,
+ const Alter_inplace_info* ha_alter_info,
+ const dict_table_t* table)
+{
+ if (error_key_num == ULINT_UNDEFINED) {
+ return(FTS_DOC_ID_INDEX_NAME);
+ } else if (ha_alter_info->key_count == 0) {
+ return(dict_table_get_first_index(table)->name);
+ } else {
+ return(ha_alter_info->key_info_buffer[error_key_num].name);
+ }
+}
+
/** Alter the table structure in-place with operations
specified using Alter_inplace_info.
The level of concurrency allowed during this operation depends
@@ -4208,17 +4223,13 @@ oom:
case DB_ONLINE_LOG_TOO_BIG:
DBUG_ASSERT(ctx->online);
my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
- (prebuilt->trx->error_key_num == ULINT_UNDEFINED)
- ? FTS_DOC_ID_INDEX_NAME
- : ha_alter_info->key_info_buffer[
- prebuilt->trx->error_key_num].name);
+ get_error_key_name(prebuilt->trx->error_key_num,
+ ha_alter_info, prebuilt->table));
break;
case DB_INDEX_CORRUPT:
my_error(ER_INDEX_CORRUPT, MYF(0),
- (prebuilt->trx->error_key_num == ULINT_UNDEFINED)
- ? FTS_DOC_ID_INDEX_NAME
- : ha_alter_info->key_info_buffer[
- prebuilt->trx->error_key_num].name);
+ get_error_key_name(prebuilt->trx->error_key_num,
+ ha_alter_info, prebuilt->table));
break;
default:
my_error_innodb(error,
@@ -5031,7 +5042,6 @@ innobase_update_foreign_cache(
"Foreign key constraints for table '%s'"
" are loaded with charset check off",
user_table->name);
-
}
}
@@ -5131,14 +5141,13 @@ commit_try_rebuild(
DBUG_RETURN(true);
case DB_ONLINE_LOG_TOO_BIG:
my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
- ha_alter_info->key_info_buffer[0].name);
+ get_error_key_name(err_key, ha_alter_info,
+ rebuilt_table));
DBUG_RETURN(true);
case DB_INDEX_CORRUPT:
my_error(ER_INDEX_CORRUPT, MYF(0),
- (err_key == ULINT_UNDEFINED)
- ? FTS_DOC_ID_INDEX_NAME
- : ha_alter_info->key_info_buffer[err_key]
- .name);
+ get_error_key_name(err_key, ha_alter_info,
+ rebuilt_table));
DBUG_RETURN(true);
default:
my_error_innodb(error, table_name, user_table->flags);
diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc
index 59cad1c2e7a..f2686f1049b 100644
--- a/storage/xtradb/handler/i_s.cc
+++ b/storage/xtradb/handler/i_s.cc
@@ -8343,29 +8343,7 @@ i_s_innodb_changed_pages_fill(
while(log_online_bitmap_iterator_next(&i) &&
(!srv_max_changed_pages ||
- output_rows_num < srv_max_changed_pages) &&
- /*
- There is no need to compare both start LSN and end LSN fields
- with maximum value. It's enough to compare only start LSN.
- Example:
-
- max_lsn = 100
- \\\\\\\\\\\\\\\\\\\\\\\\\|\\\\\\\\ - Query 1
- I------I I-------I I-------------I I----I
- ////////////////// | - Query 2
- 1 2 3 4
-
- Query 1:
- SELECT * FROM INNODB_CHANGED_PAGES WHERE start_lsn < 100
- will select 1,2,3 bitmaps
- Query 2:
- SELECT * FROM INNODB_CHANGED_PAGES WHERE end_lsn < 100
- will select 1,2 bitmaps
-
- The condition start_lsn <= 100 will be false after reading
- 1,2,3 bitmaps which suits for both cases.
- */
- LOG_BITMAP_ITERATOR_START_LSN(i) <= max_lsn)
+ output_rows_num < srv_max_changed_pages))
{
if (!LOG_BITMAP_ITERATOR_PAGE_CHANGED(i))
continue;
diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h
index f14288d0f89..19ab7d8bc3a 100644
--- a/storage/xtradb/include/btr0cur.h
+++ b/storage/xtradb/include/btr0cur.h
@@ -294,11 +294,7 @@ btr_cur_update_alloc_zip_func(
false=update-in-place */
mtr_t* mtr, /*!< in/out: mini-transaction */
trx_t* trx) /*!< in: NULL or transaction */
-#ifdef UNIV_DEBUG
- MY_ATTRIBUTE((nonnull (1, 2, 3, 4, 7), warn_unused_result));
-#else
- MY_ATTRIBUTE((nonnull (1, 2, 3, 6), warn_unused_result));
-#endif
+ MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
# define btr_cur_update_alloc_zip(page_zip,cursor,index,offsets,len,cr,mtr,trx) \
@@ -428,7 +424,7 @@ btr_cur_del_mark_set_clust_rec(
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/***********************************************************//**
Sets a secondary index record delete mark to TRUE or FALSE.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
@@ -441,7 +437,7 @@ btr_cur_del_mark_set_sec_rec(
ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*************************************************************//**
Tries to compress a page of the tree if it seems useful. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. To avoid
@@ -609,8 +605,7 @@ btr_cur_disown_inherited_fields(
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
const upd_t* update, /*!< in: update vector */
- mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull(2,3,4,5,6)));
+ mtr_t* mtr); /*!< in/out: mini-transaction */
/** Operation code for btr_store_big_rec_extern_fields(). */
enum blob_op {
@@ -655,7 +650,7 @@ btr_store_big_rec_extern_fields(
mtr_t* btr_mtr, /*!< in: mtr containing the
latches to the clustered index */
enum blob_op op) /*! in: operation code */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************************//**
Frees the space in an externally stored field to the file space
@@ -751,8 +746,7 @@ btr_push_update_extern_fields(
/*==========================*/
dtuple_t* tuple, /*!< in/out: data tuple */
const upd_t* update, /*!< in: update vector */
- mem_heap_t* heap) /*!< in: memory heap */
- MY_ATTRIBUTE((nonnull));
+ mem_heap_t* heap); /*!< in: memory heap */
/***********************************************************//**
Sets a secondary index record's delete mark to the given value. This
function is only used by the insert buffer merge mechanism. */
diff --git a/storage/xtradb/include/btr0sea.h b/storage/xtradb/include/btr0sea.h
index 8f438bf640e..bfe2c43defb 100644
--- a/storage/xtradb/include/btr0sea.h
+++ b/storage/xtradb/include/btr0sea.h
@@ -200,7 +200,7 @@ hash_table_t*
btr_search_get_hash_table(
/*======================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((pure,warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Returns the adaptive hash index latch for a given index key.
@@ -210,7 +210,7 @@ prio_rw_lock_t*
btr_search_get_latch(
/*=================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((pure,warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Returns the AHI partition number corresponding to a given index ID. */
@@ -227,8 +227,7 @@ UNIV_INLINE
void
btr_search_index_init(
/*===============*/
- dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull));
+ dict_index_t* index); /*!< in: index */
/********************************************************************//**
Latches all adaptive hash index latches in exclusive mode. */
diff --git a/storage/xtradb/include/btr0sea.ic b/storage/xtradb/include/btr0sea.ic
index 3cbcff75f31..e963d8a8449 100644
--- a/storage/xtradb/include/btr0sea.ic
+++ b/storage/xtradb/include/btr0sea.ic
@@ -90,7 +90,6 @@ btr_search_get_hash_table(
/*======================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->search_table);
return(index->search_table);
@@ -105,7 +104,6 @@ btr_search_get_latch(
/*=================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->search_latch >= btr_search_latch_arr &&
index->search_latch < btr_search_latch_arr +
btr_search_index_num);
@@ -132,8 +130,6 @@ btr_search_index_init(
/*===============*/
dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
-
index->search_latch =
&btr_search_latch_arr[btr_search_get_key(index->id)];
index->search_table =
diff --git a/storage/xtradb/include/buf0buddy.ic b/storage/xtradb/include/buf0buddy.ic
index 9bc8e9e8762..a5fb510dd19 100644
--- a/storage/xtradb/include/buf0buddy.ic
+++ b/storage/xtradb/include/buf0buddy.ic
@@ -50,7 +50,7 @@ buf_buddy_alloc_low(
allocated from the LRU list and
buf_pool->LRU_list_mutex was
temporarily released */
- MY_ATTRIBUTE((malloc, nonnull));
+ MY_ATTRIBUTE((malloc));
/**********************************************************************//**
Deallocate a block. */
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index 8110fbc4808..18c1cb4ff9d 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -242,8 +243,7 @@ buf_relocate(
buf_page_t* bpage, /*!< in/out: control block being relocated;
buf_page_get_state(bpage) must be
BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
- buf_page_t* dpage) /*!< in/out: destination control block */
- MY_ATTRIBUTE((nonnull));
+ buf_page_t* dpage); /*!< in/out: destination control block */
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@@ -737,7 +737,7 @@ buf_page_print(
ulint flags) /*!< in: 0 or
BUF_PAGE_PRINT_NO_CRASH or
BUF_PAGE_PRINT_NO_FULL */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/********************************************************************//**
Decompress a block.
@return TRUE if successful */
@@ -1949,7 +1949,10 @@ struct buf_pool_t{
os_event_t no_flush[BUF_FLUSH_N_TYPES];
/*!< this is in the set state
when there is no flush batch
- of the given type running */
+ of the given type running;
+ os_event_set() and os_event_reset()
+ are protected by
+ buf_pool_t::flush_state_mutex */
ib_rbt_t* flush_rbt; /*!< a red-black tree is used
exclusively during recovery to
speed up insertions in the
diff --git a/storage/xtradb/include/buf0dblwr.h b/storage/xtradb/include/buf0dblwr.h
index a62a6400d97..5582778825c 100644
--- a/storage/xtradb/include/buf0dblwr.h
+++ b/storage/xtradb/include/buf0dblwr.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -134,11 +135,13 @@ struct buf_dblwr_t{
ulint b_reserved;/*!< number of slots currently reserved
for batch flush. */
os_event_t b_event;/*!< event where threads wait for a
- batch flush to end. */
+ batch flush to end;
+ os_event_set() and os_event_reset()
+ are protected by buf_dblwr_t::mutex */
ulint s_reserved;/*!< number of slots currently
reserved for single page flushes. */
os_event_t s_event;/*!< event where threads wait for a
- single page flush slot. */
+ single page flush slot. Protected by mutex. */
bool* in_use; /*!< flag used to indicate if a slot is
in use. Only used for single page
flushes. */
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index a85f6f1eef9..a2481fbad6f 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -747,7 +747,7 @@ ulint
dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check whether the index is unique.
@return nonzero for unique index, zero for other indexes */
@@ -756,7 +756,7 @@ ulint
dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check whether the index is the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
@@ -765,7 +765,7 @@ ulint
dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check whether the index is a secondary index or the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
@@ -774,7 +774,7 @@ ulint
dict_index_is_sec_or_ibuf(
/*======================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************************
Gets the all the FTS indexes for the table. NOTE: must not be called for
@@ -796,7 +796,7 @@ ulint
dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets the number of system columns in a table in the dictionary cache.
@return number of system (e.g., ROW_ID) columns of a table */
@@ -815,7 +815,7 @@ ulint
dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets the approximately estimated number of rows in the table.
@return estimated number of rows */
@@ -1745,7 +1745,7 @@ ulint
dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1758,7 +1758,7 @@ dict_set_corrupted(
dict_index_t* index, /*!< in/out: index */
trx_t* trx, /*!< in/out: transaction */
const char* ctx) /*!< in: context */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/**********************************************************************//**
Flags an index corrupted in the data dictionary cache only. This
@@ -1769,8 +1769,7 @@ void
dict_set_corrupted_index_cache_only(
/*================================*/
dict_index_t* index, /*!< in/out: index */
- dict_table_t* table) /*!< in/out: table */
- MY_ATTRIBUTE((nonnull));
+ dict_table_t* table); /*!< in/out: table */
/**********************************************************************//**
Flags a table with specified space_id corrupted in the table dictionary
diff --git a/storage/xtradb/include/dict0dict.ic b/storage/xtradb/include/dict0dict.ic
index 58a9ef4d65d..efc2c3f0e68 100644
--- a/storage/xtradb/include/dict0dict.ic
+++ b/storage/xtradb/include/dict0dict.ic
@@ -266,7 +266,6 @@ dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_CLUSTERED);
@@ -280,7 +279,6 @@ dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_UNIQUE);
@@ -295,7 +293,6 @@ dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_IBUF);
@@ -327,7 +324,6 @@ dict_index_is_sec_or_ibuf(
{
ulint type;
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
type = index->type;
@@ -345,7 +341,6 @@ dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols - DATA_N_SYS_COLS);
@@ -377,7 +372,6 @@ dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols);
@@ -1373,7 +1367,6 @@ dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return((index->type & DICT_CORRUPT)
diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h
index bbd05dae551..771864c89ba 100644
--- a/storage/xtradb/include/dict0mem.h
+++ b/storage/xtradb/include/dict0mem.h
@@ -138,7 +138,7 @@ allows InnoDB to update_create_info() accordingly. */
+ DICT_TF_WIDTH_DATA_DIR)
/** A mask of all the known/used bits in table flags */
-#define DICT_TF_BIT_MASK (~(~0 << DICT_TF_BITS))
+#define DICT_TF_BIT_MASK (~(~0U << DICT_TF_BITS))
/** Zero relative shift position of the COMPACT field */
#define DICT_TF_POS_COMPACT 0
diff --git a/storage/xtradb/include/dict0stats_bg.h b/storage/xtradb/include/dict0stats_bg.h
index 82cd2b468b9..1973380d197 100644
--- a/storage/xtradb/include/dict0stats_bg.h
+++ b/storage/xtradb/include/dict0stats_bg.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -32,7 +33,8 @@ Created Apr 26, 2012 Vasil Dimov
#include "os0sync.h" /* os_event_t */
#include "os0thread.h" /* DECLARE_THREAD */
-/** Event to wake up the stats thread */
+/** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add()
+or shutdown. Not protected by any mutex. */
extern os_event_t dict_stats_event;
/*****************************************************************//**
diff --git a/storage/xtradb/include/dyn0dyn.h b/storage/xtradb/include/dyn0dyn.h
index 1bd10b6bf58..20963a1472b 100644
--- a/storage/xtradb/include/dyn0dyn.h
+++ b/storage/xtradb/include/dyn0dyn.h
@@ -46,9 +46,8 @@ UNIV_INLINE
dyn_array_t*
dyn_array_create(
/*=============*/
- dyn_array_t* arr) /*!< in/out memory buffer of
+ dyn_array_t* arr); /*!< in/out memory buffer of
size sizeof(dyn_array_t) */
- MY_ATTRIBUTE((nonnull));
/************************************************************//**
Frees a dynamic array. */
UNIV_INLINE
@@ -69,7 +68,7 @@ dyn_array_open(
dyn_array_t* arr, /*!< in: dynamic array */
ulint size) /*!< in: size in bytes of the buffer; MUST be
smaller than DYN_ARRAY_DATA_SIZE! */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Closes the buffer returned by dyn_array_open. */
UNIV_INLINE
@@ -77,8 +76,7 @@ void
dyn_array_close(
/*============*/
dyn_array_t* arr, /*!< in: dynamic array */
- const byte* ptr) /*!< in: end of used space */
- MY_ATTRIBUTE((nonnull));
+ const byte* ptr); /*!< in: end of used space */
/*********************************************************************//**
Makes room on top of a dyn array and returns a pointer to
the added element. The caller must copy the element to
@@ -90,7 +88,7 @@ dyn_array_push(
/*===========*/
dyn_array_t* arr, /*!< in/out: dynamic array */
ulint size) /*!< in: size in bytes of the element */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Returns pointer to an element in dyn array.
@return pointer to element */
@@ -101,7 +99,7 @@ dyn_array_get_element(
const dyn_array_t* arr, /*!< in: dyn array */
ulint pos) /*!< in: position of element
in bytes from array start */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Returns the size of stored data in a dyn array.
@return data size in bytes */
@@ -110,7 +108,7 @@ ulint
dyn_array_get_data_size(
/*====================*/
const dyn_array_t* arr) /*!< in: dyn array */
- MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Gets the first block in a dyn array.
@param arr dyn array
@@ -144,7 +142,7 @@ ulint
dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
- MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets pointer to the start of data in a dyn array block.
@return pointer to data */
@@ -153,7 +151,7 @@ byte*
dyn_block_get_data(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
- MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************//**
Pushes n bytes to a dyn array. */
UNIV_INLINE
diff --git a/storage/xtradb/include/dyn0dyn.ic b/storage/xtradb/include/dyn0dyn.ic
index f18f2e6dff9..6e97649245e 100644
--- a/storage/xtradb/include/dyn0dyn.ic
+++ b/storage/xtradb/include/dyn0dyn.ic
@@ -36,7 +36,7 @@ dyn_block_t*
dyn_array_add_block(
/*================*/
dyn_array_t* arr) /*!< in/out: dyn array */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets the number of used bytes in a dyn array block.
@@ -47,8 +47,6 @@ dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
{
- ut_ad(block);
-
return((block->used) & ~DYN_BLOCK_FULL_FLAG);
}
@@ -76,7 +74,6 @@ dyn_array_create(
dyn_array_t* arr) /*!< in/out: memory buffer of
size sizeof(dyn_array_t) */
{
- ut_ad(arr);
#if DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG
# error "DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG"
#endif
@@ -119,7 +116,6 @@ dyn_array_push(
dyn_block_t* block;
ulint used;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -159,7 +155,6 @@ dyn_array_open(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -195,7 +190,6 @@ dyn_array_close(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
block = dyn_array_get_last_block(arr);
@@ -222,7 +216,6 @@ dyn_array_get_element(
{
const dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
/* Get the first array block */
@@ -260,7 +253,6 @@ dyn_array_get_data_size(
const dyn_block_t* block;
ulint sum = 0;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h
index 2581384ff22..3023b29b793 100644
--- a/storage/xtradb/include/fil0fil.h
+++ b/storage/xtradb/include/fil0fil.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -199,7 +200,9 @@ struct fil_node_t {
ibool open; /*!< TRUE if file open */
os_file_t handle; /*!< OS handle to the file, if file open */
os_event_t sync_event;/*!< Condition event to group and
- serialize calls to fsync */
+ serialize calls to fsync;
+ os_event_set() and os_event_reset()
+ are protected by fil_system_t::mutex */
ibool is_raw_disk;/*!< TRUE if the 'file' is actually a raw
device or a raw disk partition */
ulint size; /*!< size of the file in database pages, 0 if
diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h
index 099cb8edc14..0097537e4d6 100644
--- a/storage/xtradb/include/fsp0fsp.h
+++ b/storage/xtradb/include/fsp0fsp.h
@@ -61,7 +61,7 @@ is found in a remote location, not the default data directory. */
+ FSP_FLAGS_WIDTH_DATA_DIR)
/** A mask of all the known/used bits in tablespace flags */
-#define FSP_FLAGS_MASK (~(~0 << FSP_FLAGS_WIDTH))
+#define FSP_FLAGS_MASK (~(~0U << FSP_FLAGS_WIDTH))
/** Zero relative shift position of the POST_ANTELOPE field */
#define FSP_FLAGS_POS_POST_ANTELOPE 0
diff --git a/storage/xtradb/include/fsp0types.h b/storage/xtradb/include/fsp0types.h
index 94fd908ab0c..9734f4c8abc 100644
--- a/storage/xtradb/include/fsp0types.h
+++ b/storage/xtradb/include/fsp0types.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/xtradb/include/fts0fts.h b/storage/xtradb/include/fts0fts.h
index 3e2f359bbeb..7aa7055640c 100644
--- a/storage/xtradb/include/fts0fts.h
+++ b/storage/xtradb/include/fts0fts.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2016, MariaDB Corporation. All Rights reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -366,8 +367,8 @@ extern ulong fts_min_token_size;
need a sync to free some memory */
extern bool fts_need_sync;
-/** Maximum possible Fulltext word length */
-#define FTS_MAX_WORD_LEN HA_FT_MAXBYTELEN
+/** Maximum possible Fulltext word length in bytes (assuming mbmaxlen=4) */
+#define FTS_MAX_WORD_LEN (HA_FT_MAXCHARLEN * 4)
/** Maximum possible Fulltext word length (in characters) */
#define FTS_MAX_WORD_LEN_IN_CHAR HA_FT_MAXCHARLEN
diff --git a/storage/xtradb/include/fts0types.h b/storage/xtradb/include/fts0types.h
index e495fe72a60..0dad75d8f1b 100644
--- a/storage/xtradb/include/fts0types.h
+++ b/storage/xtradb/include/fts0types.h
@@ -126,7 +126,9 @@ struct fts_sync_t {
bool in_progress; /*!< flag whether sync is in progress.*/
bool unlock_cache; /*!< flag whether unlock cache when
write fts node */
- os_event_t event; /*!< sync finish event */
+ os_event_t event; /*!< sync finish event;
+ only os_event_set() and os_event_wait()
+ are used */
};
/** The cache for the FTS system. It is a memory-based inverted index
diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h
index 45955f3a7ea..698780a9a9e 100644
--- a/storage/xtradb/include/lock0lock.h
+++ b/storage/xtradb/include/lock0lock.h
@@ -943,7 +943,12 @@ struct lock_sys_t{
srv_slot_t* waiting_threads; /*!< Array of user threads
suspended while waiting for
locks within InnoDB, protected
- by the lock_sys->wait_mutex */
+ by the lock_sys->wait_mutex;
+ os_event_set() and
+ os_event_reset() on
+ waiting_threads[]->event
+ are protected by
+ trx_t::mutex */
srv_slot_t* last_slot; /*!< highest slot ever used
in the waiting_threads array,
protected by
@@ -956,10 +961,11 @@ struct lock_sys_t{
ulint n_lock_max_wait_time; /*!< Max wait time */
- os_event_t timeout_event; /*!< Set to the event that is
- created in the lock wait monitor
- thread. A value of 0 means the
- thread is not active */
+ os_event_t timeout_event; /*!< An event waited for by
+ lock_wait_timeout_thread.
+ Not protected by a mutex,
+ but the waits are timed.
+ Signaled on shutdown only. */
bool timeout_thread_active; /*!< True if the timeout thread
is running */
diff --git a/storage/xtradb/include/log0log.h b/storage/xtradb/include/log0log.h
index f716e534b01..259c4301de9 100644
--- a/storage/xtradb/include/log0log.h
+++ b/storage/xtradb/include/log0log.h
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2009, Google Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -924,10 +925,8 @@ struct log_t{
be 'flush_or_write'! */
os_event_t no_flush_event; /*!< this event is in the reset state
when a flush or a write is running;
- a thread should wait for this without
- owning the log mutex, but NOTE that
- to set or reset this event, the
- thread MUST own the log mutex! */
+ os_event_set() and os_event_reset()
+ are protected by log_sys_t::mutex */
ibool one_flushed; /*!< during a flush, this is
first FALSE and becomes TRUE
when one log group has been
@@ -936,11 +935,9 @@ struct log_t{
flush or write has not yet completed
for any log group; e.g., this means
that a transaction has been committed
- when this is set; a thread should wait
- for this without owning the log mutex,
- but NOTE that to set or reset this
- event, the thread MUST own the log
- mutex! */
+ when this is set;
+ os_event_set() and os_event_reset()
+ are protected by log_sys_t::mutex */
ulint n_log_ios; /*!< number of log i/os initiated thus
far */
ulint n_log_ios_old; /*!< number of log i/o's at the
@@ -1026,9 +1023,9 @@ struct log_t{
byte* archive_buf_ptr;/*!< unaligned archived_buf */
byte* archive_buf; /*!< log segment is written to the
archive from this buffer */
- os_event_t archiving_on; /*!< if archiving has been stopped,
- a thread can wait for this event to
- become signaled */
+ os_event_t archiving_on; /*!< if archiving has been stopped;
+ os_event_set() and os_event_reset()
+ are protected by log_sys_t::mutex */
/* @} */
#endif /* UNIV_LOG_ARCHIVE */
lsn_t tracked_lsn; /*!< log tracking has advanced to this
diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h
index 5706f3af4b0..722336dd6b4 100644
--- a/storage/xtradb/include/log0online.h
+++ b/storage/xtradb/include/log0online.h
@@ -38,19 +38,25 @@ log_online_bitmap_file_range_t;
/** An iterator over changed page info */
typedef struct log_bitmap_iterator_struct log_bitmap_iterator_t;
-/*********************************************************************//**
-Initializes the online log following subsytem. */
+/** Initialize the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_init(void);
+
+/** Initialize the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_init(void);
-/*=======================*/
-/*********************************************************************//**
-Shuts down the online log following subsystem. */
+/** Shut down the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_shutdown(void);
-/*===========================*/
+
+/** Shut down the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_shutdown(void);
/*********************************************************************//**
Reads and parses the redo log up to last checkpoint LSN to build the changed
@@ -147,6 +153,8 @@ struct log_online_bitmap_file_range_struct {
/** Struct for an iterator through all bits of changed pages bitmap blocks */
struct log_bitmap_iterator_struct
{
+ lsn_t max_lsn; /*!< End LSN of the
+ range */
ibool failed; /*!< Has the iteration
stopped prematurely */
log_online_bitmap_file_range_t in_files; /*!< The bitmap files
diff --git a/storage/xtradb/include/mach0data.h b/storage/xtradb/include/mach0data.h
index 9859def0adc..2e16634a6c2 100644
--- a/storage/xtradb/include/mach0data.h
+++ b/storage/xtradb/include/mach0data.h
@@ -53,7 +53,7 @@ ulint
mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in two consecutive
bytes. We store the most significant byte to the lower address. */
@@ -114,7 +114,7 @@ ulint
mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in four consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -133,7 +133,7 @@ ulint
mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************//**
Writes a ulint in a compressed form (1..5 bytes).
@return stored size in bytes */
@@ -160,7 +160,7 @@ ulint
mach_read_compressed(
/*=================*/
const byte* b) /*!< in: pointer to memory from where to read */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in 6 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -179,7 +179,7 @@ ib_uint64_t
mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in 7 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -198,7 +198,7 @@ ib_uint64_t
mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in 8 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -243,7 +243,7 @@ ib_uint64_t
mach_ull_read_compressed(
/*=====================*/
const byte* b) /*!< in: pointer to memory from where to read */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************//**
Writes a 64-bit integer in a compressed form (1..11 bytes).
@return size in bytes */
@@ -270,7 +270,7 @@ ib_uint64_t
mach_ull_read_much_compressed(
/*==========================*/
const byte* b) /*!< in: pointer to memory from where to read */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************//**
Reads a ulint in a compressed form if the log record fully contains it.
@return pointer to end of the stored field, NULL if not complete */
diff --git a/storage/xtradb/include/mach0data.ic b/storage/xtradb/include/mach0data.ic
index 27b9f62b552..4b9275cc12c 100644
--- a/storage/xtradb/include/mach0data.ic
+++ b/storage/xtradb/include/mach0data.ic
@@ -52,7 +52,6 @@ mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
{
- ut_ad(b);
return((ulint)(b[0]));
}
@@ -132,7 +131,6 @@ mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 16)
| ((ulint)(b[1]) << 8)
| (ulint)(b[2])
@@ -182,7 +180,6 @@ mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 24)
| ((ulint)(b[1]) << 16)
| ((ulint)(b[2]) << 8)
@@ -261,8 +258,6 @@ mach_read_compressed(
{
ulint flag;
- ut_ad(b);
-
flag = mach_read_from_1(b);
if (flag < 0x80UL) {
@@ -339,8 +334,6 @@ mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_3(b), mach_read_from_4(b + 3)));
}
@@ -370,8 +363,6 @@ mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_2(b), mach_read_from_4(b + 2)));
}
@@ -419,8 +410,6 @@ mach_ull_read_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
n = (ib_uint64_t) mach_read_compressed(b);
size = mach_get_compressed_size((ulint) n);
@@ -486,8 +475,6 @@ mach_ull_read_much_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
if (*b != (byte)0xFF) {
n = 0;
size = 0;
diff --git a/storage/xtradb/include/mtr0mtr.h b/storage/xtradb/include/mtr0mtr.h
index c00472117dc..f939918a90d 100644
--- a/storage/xtradb/include/mtr0mtr.h
+++ b/storage/xtradb/include/mtr0mtr.h
@@ -227,8 +227,7 @@ UNIV_INTERN
void
mtr_commit(
/*=======*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in/out: mini-transaction */
/**********************************************************//**
Sets and returns a savepoint in mtr.
@return savepoint */
@@ -334,7 +333,7 @@ mtr_memo_contains(
mtr_t* mtr, /*!< in: mtr */
const void* object, /*!< in: object to search */
ulint type) /*!< in: type of object */
- MY_ATTRIBUTE((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result));
/**********************************************************//**
Checks if memo contains the given page.
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index d2d2be7662f..60a9c3475e7 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -552,9 +553,10 @@ os_file_create_simple_no_error_handling_func(
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
MY_ATTRIBUTE((nonnull, warn_unused_result));
/****************************************************************//**
-Tries to disable OS caching on an opened file descriptor. */
+Tries to disable OS caching on an opened file descriptor.
+@return true if operation is success and false otherwise */
UNIV_INTERN
-void
+bool
os_file_set_nocache(
/*================*/
os_file_t fd, /*!< in: file descriptor to alter */
@@ -1175,6 +1177,7 @@ UNIV_INTERN
void
os_aio_simulated_wake_handler_threads(void);
/*=======================================*/
+#ifdef _WIN32
/**********************************************************************//**
This function can be called if one wants to post a batch of reads and
prefers an i/o-handler thread to handle them all at once later. You must
@@ -1182,8 +1185,10 @@ call os_aio_simulated_wake_handler_threads later to ensure the threads
are not left sleeping! */
UNIV_INTERN
void
-os_aio_simulated_put_read_threads_to_sleep(void);
-/*============================================*/
+os_aio_simulated_put_read_threads_to_sleep();
+#else /* _WIN32 */
+# define os_aio_simulated_put_read_threads_to_sleep()
+#endif /* _WIN32 */
#ifdef WIN_ASYNC_IO
/**********************************************************************//**
diff --git a/storage/xtradb/include/os0thread.h b/storage/xtradb/include/os0thread.h
index 671b9b7dc3f..7865358b0f7 100644
--- a/storage/xtradb/include/os0thread.h
+++ b/storage/xtradb/include/os0thread.h
@@ -131,11 +131,9 @@ os_thread_create_func(
os_thread_id_t* thread_id); /*!< out: id of the created
thread, or NULL */
-/**
-Waits until the specified thread completes and joins it. Its return value is
-ignored.
-
-@param thread thread to join */
+/** Waits until the specified thread completes and joins it.
+Its return value is ignored.
+@param[in,out] thread thread to join */
UNIV_INTERN
void
os_thread_join(
diff --git a/storage/xtradb/include/page0page.h b/storage/xtradb/include/page0page.h
index cb43c937757..eefa0fa4c5b 100644
--- a/storage/xtradb/include/page0page.h
+++ b/storage/xtradb/include/page0page.h
@@ -235,8 +235,7 @@ ulint
page_header_get_offs(
/*=================*/
const page_t* page, /*!< in: page */
- ulint field) /*!< in: PAGE_FREE, ... */
- MY_ATTRIBUTE((nonnull, pure));
+ ulint field); /*!< in: PAGE_FREE, ... */
/*************************************************************//**
Returns the pointer stored in the given header field, or NULL. */
@@ -528,7 +527,7 @@ bool
page_is_leaf(
/*=========*/
const page_t* page) /*!< in: page */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Determine whether the page is empty.
@return true if the page is empty (PAGE_N_RECS = 0) */
@@ -849,8 +848,7 @@ page_copy_rec_list_end(
buf_block_t* block, /*!< in: index page containing rec */
rec_t* rec, /*!< in: record on page */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
Copies records from page to new_page, up to the given record, NOT
including that record. Infimum and supremum records are not copied.
@@ -871,8 +869,7 @@ page_copy_rec_list_start(
buf_block_t* block, /*!< in: index page containing rec */
rec_t* rec, /*!< in: record on page */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
Deletes records from a page from a given record onward, including that record.
The infimum and supremum records are not deleted. */
@@ -921,8 +918,7 @@ page_move_rec_list_end(
buf_block_t* block, /*!< in: index page from where to move */
rec_t* split_rec, /*!< in: first record to move */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
- MY_ATTRIBUTE((nonnull(1, 2, 4, 5)));
+ mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
Moves record list start to another page. Moved records do not include
split_rec.
@@ -952,8 +948,7 @@ page_dir_split_slot(
page_t* page, /*!< in: index page */
page_zip_des_t* page_zip,/*!< in/out: compressed page whose
uncompressed part will be written, or NULL */
- ulint slot_no)/*!< in: the directory slot */
- MY_ATTRIBUTE((nonnull(1)));
+ ulint slot_no);/*!< in: the directory slot */
/*************************************************************//**
Tries to balance the given directory slot with too few records
with the upper neighbor, so that there are at least the minimum number
@@ -965,8 +960,7 @@ page_dir_balance_slot(
/*==================*/
page_t* page, /*!< in/out: index page */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
- ulint slot_no)/*!< in: the directory slot */
- MY_ATTRIBUTE((nonnull(1)));
+ ulint slot_no);/*!< in: the directory slot */
/**********************************************************//**
Parses a log record of a record list end or start deletion.
@return end of log record or NULL */
diff --git a/storage/xtradb/include/page0page.ic b/storage/xtradb/include/page0page.ic
index 4a22a32112f..80939bbb828 100644
--- a/storage/xtradb/include/page0page.ic
+++ b/storage/xtradb/include/page0page.ic
@@ -154,7 +154,6 @@ page_header_get_offs(
{
ulint offs;
- ut_ad(page);
ut_ad((field == PAGE_FREE)
|| (field == PAGE_LAST_INSERT)
|| (field == PAGE_HEAP_TOP));
diff --git a/storage/xtradb/include/page0zip.h b/storage/xtradb/include/page0zip.h
index 81068e7bd29..adafaa6d8b6 100644
--- a/storage/xtradb/include/page0zip.h
+++ b/storage/xtradb/include/page0zip.h
@@ -132,7 +132,7 @@ page_zip_compress(
dict_index_t* index, /*!< in: index of the B-tree node */
ulint level, /*!< in: compression level */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
- MY_ATTRIBUTE((nonnull(1,2,3)));
+ MY_ATTRIBUTE((warn_unused_result));
/**********************************************************************//**
Decompress a page. This function should tolerate errors on the compressed
@@ -424,8 +424,7 @@ page_zip_reorganize(
out: data, n_blobs,
m_start, m_end, m_nonempty */
dict_index_t* index, /*!< in: index of the B-tree node */
- mtr_t* mtr) /*!< in: mini-transaction */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in: mini-transaction */
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Copy the records of a page byte for byte. Do not copy the page header
@@ -458,7 +457,7 @@ page_zip_parse_compress(
byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< out: uncompressed page */
page_zip_des_t* page_zip)/*!< out: compressed page */
- MY_ATTRIBUTE((nonnull(1,2)));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/xtradb/include/rem0rec.h b/storage/xtradb/include/rem0rec.h
index a6d47e80049..3d147e3d5fb 100644
--- a/storage/xtradb/include/rem0rec.h
+++ b/storage/xtradb/include/rem0rec.h
@@ -747,8 +747,7 @@ rec_copy(
/*=====*/
void* buf, /*!< in: buffer */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- MY_ATTRIBUTE((nonnull));
+ const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
#ifndef UNIV_HOTBACKUP
/**********************************************************//**
Determines the size of a data tuple prefix in a temporary file.
diff --git a/storage/xtradb/include/row0upd.h b/storage/xtradb/include/row0upd.h
index e59ec58b63c..4312fcf7339 100644
--- a/storage/xtradb/include/row0upd.h
+++ b/storage/xtradb/include/row0upd.h
@@ -248,9 +248,8 @@ row_upd_index_replace_new_col_vals_index_pos(
/*!< in: if TRUE, limit the replacement to
ordering fields of index; note that this
does not work for non-clustered indexes. */
- mem_heap_t* heap) /*!< in: memory heap for allocating and
+ mem_heap_t* heap); /*!< in: memory heap for allocating and
copying the new values */
- MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Replaces the new column values stored in the update vector to the index entry
given. */
@@ -311,7 +310,7 @@ row_upd_changes_ord_field_binary_func(
compile time */
const row_ext_t*ext) /*!< NULL, or prefixes of the externally
stored columns in the old row */
- MY_ATTRIBUTE((nonnull(1,2), warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
# define row_upd_changes_ord_field_binary(index,update,thr,row,ext) \
row_upd_changes_ord_field_binary_func(index,update,thr,row,ext)
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index 8ea92f69368..53098d7e426 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation Ab. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -152,13 +152,16 @@ extern const char* srv_main_thread_op_info;
/** Prefix used by MySQL to indicate pre-5.1 table name encoding */
extern const char srv_mysql50_table_name_prefix[10];
-/* The monitor thread waits on this event. */
+/** Event to signal srv_monitor_thread. Not protected by a mutex.
+Set after setting srv_print_innodb_monitor. */
extern os_event_t srv_monitor_event;
-/* The error monitor thread waits on this event. */
+/** Event to signal the shutdown of srv_error_monitor_thread.
+Not protected by a mutex. */
extern os_event_t srv_error_event;
-/** The buffer pool dump/load thread waits on this event. */
+/** Event for waking up buf_dump_thread. Not protected by a mutex.
+Set on shutdown or by buf_dump_start() or buf_load_start(). */
extern os_event_t srv_buf_dump_event;
/** The buffer pool dump/load file name */
@@ -423,9 +426,6 @@ extern double srv_adaptive_flushing_lwm;
extern ulong srv_flushing_avg_loops;
extern ulong srv_force_recovery;
-#ifndef DBUG_OFF
-extern ulong srv_force_recovery_crash;
-#endif /* !DBUG_OFF */
extern ulint srv_fast_shutdown; /*!< If this is 1, do not do a
purge and index buffer merge.
@@ -440,6 +440,7 @@ extern unsigned long long srv_stats_transient_sample_pages;
extern my_bool srv_stats_persistent;
extern unsigned long long srv_stats_persistent_sample_pages;
extern my_bool srv_stats_auto_recalc;
+extern my_bool srv_stats_include_delete_marked;
extern unsigned long long srv_stats_modified_counter;
extern my_bool srv_stats_sample_traditional;
@@ -972,17 +973,12 @@ ulint
srv_get_task_queue_length(void);
/*===========================*/
-/*********************************************************************//**
-Releases threads of the type given from suspension in the thread table.
-NOTE! The server mutex has to be reserved by the caller!
-@return number of threads released: this may be less than n if not
-enough threads were suspended at the moment */
-UNIV_INTERN
-ulint
-srv_release_threads(
-/*================*/
- enum srv_thread_type type, /*!< in: thread type */
- ulint n); /*!< in: number of threads to release */
+/** Ensure that a given number of threads of the type given are running
+(or are already terminated).
+@param[in] type thread type
+@param[in] n number of threads that have to run */
+void
+srv_release_threads(enum srv_thread_type type, ulint n);
/**********************************************************************//**
Check whether any background thread are active. If so print which thread
@@ -993,12 +989,10 @@ const char*
srv_any_background_threads_are_active(void);
/*=======================================*/
-/**********************************************************************//**
-Wakeup the purge threads. */
+/** Wake up the purge threads. */
UNIV_INTERN
void
-srv_purge_wakeup(void);
-/*==================*/
+srv_purge_wakeup();
/** Status variables to be passed to MySQL */
struct export_var_t{
@@ -1166,4 +1160,12 @@ wsrep_srv_conc_cancel_wait(
thread */
#endif /* WITH_WSREP */
+#ifndef DBUG_OFF
+/** false before InnoDB monitor has been printed at least once, true
+afterwards */
+extern bool srv_debug_monitor_printed;
+#else
+#define srv_debug_monitor_printed false
+#endif
+
#endif
diff --git a/storage/xtradb/include/trx0purge.h b/storage/xtradb/include/trx0purge.h
index a862523c092..7b9b5dc49cd 100644
--- a/storage/xtradb/include/trx0purge.h
+++ b/storage/xtradb/include/trx0purge.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -145,7 +146,10 @@ struct trx_purge_t{
log operation can prevent this by
obtaining an s-latch here. It also
protects state and running */
- os_event_t event; /*!< State signal event */
+ os_event_t event; /*!< State signal event;
+ os_event_set() and os_event_reset()
+ are protected by trx_purge_t::latch
+ X-lock */
ulint n_stop; /*!< Counter to track number stops */
volatile bool running; /*!< true, if purge is active,
we check this without the latch too */
diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h
index ec810405b5a..f9917c8448d 100644
--- a/storage/xtradb/include/trx0trx.h
+++ b/storage/xtradb/include/trx0trx.h
@@ -106,7 +106,7 @@ void
trx_free_prepared(
/*==============*/
trx_t* trx) /*!< in, own: trx object */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/********************************************************************//**
Frees a transaction object for MySQL. */
UNIV_INTERN
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index 3444cbb1e7f..6acb0eec319 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 34
+#define INNODB_VERSION_BUGFIX 35
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 79.1
+#define PERCONA_INNODB_VERSION 80.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
diff --git a/storage/xtradb/include/ut0wqueue.h b/storage/xtradb/include/ut0wqueue.h
index 33385ddf2d4..09b2eb717a7 100644
--- a/storage/xtradb/include/ut0wqueue.h
+++ b/storage/xtradb/include/ut0wqueue.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2006, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -99,7 +100,9 @@ ib_wqueue_timedwait(
struct ib_wqueue_t {
ib_mutex_t mutex; /*!< mutex protecting everything */
ib_list_t* items; /*!< work item list */
- os_event_t event; /*!< event we use to signal additions to list */
+ os_event_t event; /*!< event we use to signal additions to list;
+ os_event_set() and os_event_reset() are
+ protected by ib_wqueue_t::mutex */
};
#endif
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index 15183b9c1cb..b39a8ed1829 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1569,17 +1570,7 @@ log_write_up_to(
}
loop:
-#ifdef UNIV_DEBUG
- loop_count++;
-
- ut_ad(loop_count < 5);
-
-# if 0
- if (loop_count > 2) {
- fprintf(stderr, "Log loop count %lu\n", loop_count);
- }
-# endif
-#endif
+ ut_ad(++loop_count < 100);
mutex_enter(&(log_sys->mutex));
ut_ad(!recv_no_log_write);
@@ -3505,7 +3496,6 @@ logs_empty_and_mark_files_at_shutdown(void)
lsn_t lsn;
lsn_t tracked_lsn;
ulint count = 0;
- ulint total_trx;
ulint pending_io;
enum srv_thread_type active_thd;
const char* thread_name;
@@ -3556,10 +3546,9 @@ loop:
shutdown, because the InnoDB layer may have committed or
prepared transactions and we don't want to lose them. */
- total_trx = trx_sys_any_active_transactions();
-
- if (total_trx > 0) {
-
+ if (ulint total_trx = srv_was_started && !srv_read_only_mode
+ && srv_force_recovery < SRV_FORCE_NO_TRX_UNDO
+ ? trx_sys_any_active_transactions() : 0) {
if (srv_print_verbose_log && count > 600) {
ib_logf(IB_LOG_LEVEL_INFO,
"Waiting for %lu active transactions to finish",
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index 209ca743628..764c9e0c52f 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -76,12 +76,14 @@ struct log_bitmap_struct {
both the correct type and the tree does
not mind its overwrite during
rbt_next() tree traversal. */
- ib_mutex_t mutex; /*!< mutex protecting all the fields.*/
};
/* The log parsing and bitmap output struct instance */
static struct log_bitmap_struct* log_bmp_sys;
+/* Mutex protecting log_bmp_sys */
+static ib_mutex_t log_bmp_sys_mutex;
+
/** File name stem for bitmap files. */
static const char* bmp_file_name_stem = "ib_modified_log_";
@@ -173,28 +175,24 @@ log_online_set_page_bit(
ulint space, /*!<in: log record space id */
ulint page_no)/*!<in: log record page id */
{
- ulint block_start_page;
- ulint block_pos;
- uint bit_pos;
- ib_rbt_bound_t tree_search_pos;
- byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
- byte *page_ptr;
-
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
ut_a(space != ULINT_UNDEFINED);
ut_a(page_no != ULINT_UNDEFINED);
- block_start_page = page_no / MODIFIED_PAGE_BLOCK_ID_COUNT
+ ulint block_start_page = page_no / MODIFIED_PAGE_BLOCK_ID_COUNT
* MODIFIED_PAGE_BLOCK_ID_COUNT;
- block_pos = block_start_page ? (page_no % block_start_page / 8)
+ ulint block_pos = block_start_page ? (page_no % block_start_page / 8)
: (page_no / 8);
- bit_pos = page_no % 8;
+ uint bit_pos = page_no % 8;
+ byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
mach_write_to_4(search_page + MODIFIED_PAGE_SPACE_ID, space);
mach_write_to_4(search_page + MODIFIED_PAGE_1ST_PAGE_ID,
block_start_page);
+ byte *page_ptr;
+ ib_rbt_bound_t tree_search_pos;
if (!rbt_search(log_bmp_sys->modified_pages, &tree_search_pos,
search_page)) {
page_ptr = rbt_value(byte, tree_search_pos.last);
@@ -596,12 +594,19 @@ log_online_is_bitmap_file(
&& (!strcmp(stem, bmp_file_name_stem)));
}
-/*********************************************************************//**
-Initialize the online log following subsytem. */
+/** Initialize the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_init(void)
+{
+ mutex_create(log_bmp_sys_mutex_key, &log_bmp_sys_mutex,
+ SYNC_LOG_ONLINE);
+}
+
+/** Initialize the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_init(void)
-/*======================*/
{
ibool success;
lsn_t tracking_start_lsn
@@ -625,9 +630,6 @@ log_online_read_init(void)
log_bmp_sys->read_buf = static_cast<byte *>
(ut_align(log_bmp_sys->read_buf_ptr, OS_FILE_LOG_BLOCK_SIZE));
- mutex_create(log_bmp_sys_mutex_key, &log_bmp_sys->mutex,
- SYNC_LOG_ONLINE);
-
/* Initialize bitmap file directory from srv_data_home and add a path
separator if needed. */
srv_data_home_len = strlen(srv_data_home);
@@ -767,13 +769,15 @@ log_online_read_init(void)
log_set_tracked_lsn(tracking_start_lsn);
}
-/*********************************************************************//**
-Shut down the online log following subsystem. */
+/** Shut down the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_shutdown(void)
-/*==========================*/
{
+ mutex_enter(&log_bmp_sys_mutex);
+
+ srv_track_changed_pages = FALSE;
+
ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list;
if (log_bmp_sys->out.file != os_file_invalid) {
@@ -789,10 +793,21 @@ log_online_read_shutdown(void)
free_list_node = next;
}
- mutex_free(&log_bmp_sys->mutex);
-
ut_free(log_bmp_sys->read_buf_ptr);
ut_free(log_bmp_sys);
+ log_bmp_sys = NULL;
+
+ srv_redo_log_thread_started = false;
+
+ mutex_exit(&log_bmp_sys_mutex);
+}
+
+/** Shut down the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_shutdown(void)
+{
+ mutex_free(&log_bmp_sys_mutex);
}
/*********************************************************************//**
@@ -838,13 +853,12 @@ void
log_online_parse_redo_log(void)
/*===========================*/
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
byte *ptr = log_bmp_sys->parse_buf;
byte *end = log_bmp_sys->parse_buf_end;
-
ulint len = 0;
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
while (ptr != end
&& log_bmp_sys->next_parse_lsn < log_bmp_sys->end_lsn) {
@@ -926,6 +940,8 @@ log_online_add_to_parse_buf(
ulint skip_len) /*!< in: how much of log data to
skip */
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
ulint start_offset = skip_len ? skip_len : LOG_BLOCK_HDR_SIZE;
ulint end_offset
= (data_len == OS_FILE_LOG_BLOCK_SIZE)
@@ -934,8 +950,6 @@ log_online_add_to_parse_buf(
ulint actual_data_len = (end_offset >= start_offset)
? end_offset - start_offset : 0;
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
ut_memcpy(log_bmp_sys->parse_buf_end, log_block + start_offset,
actual_data_len);
@@ -958,11 +972,9 @@ log_online_parse_redo_log_block(
log data should be skipped as
they were parsed before */
{
- ulint block_data_len;
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
- block_data_len = log_block_get_data_len(log_block);
+ ulint block_data_len = log_block_get_data_len(log_block);
ut_ad(block_data_len % OS_FILE_LOG_BLOCK_SIZE == 0
|| block_data_len < OS_FILE_LOG_BLOCK_SIZE);
@@ -982,14 +994,14 @@ log_online_follow_log_seg(
lsn_t block_start_lsn, /*!< in: the LSN to read from */
lsn_t block_end_lsn) /*!< in: the LSN to read to */
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
/* Pointer to the current OS_FILE_LOG_BLOCK-sized chunk of the read log
data to parse */
byte* log_block = log_bmp_sys->read_buf;
byte* log_block_end = log_bmp_sys->read_buf
+ (block_end_lsn - block_start_lsn);
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
mutex_enter(&log_sys->mutex);
log_group_read_log_seg(LOG_RECOVER, log_bmp_sys->read_buf,
group, block_start_lsn, block_end_lsn, TRUE);
@@ -1049,11 +1061,11 @@ log_online_follow_log_group(
lsn_t contiguous_lsn) /*!< in: the LSN of log block start
containing the log_parse_start_lsn */
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
lsn_t block_start_lsn = contiguous_lsn;
lsn_t block_end_lsn;
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
log_bmp_sys->next_parse_lsn = log_bmp_sys->start_lsn;
log_bmp_sys->parse_buf_end = log_bmp_sys->parse_buf;
@@ -1090,20 +1102,28 @@ log_online_write_bitmap_page(
/*=========================*/
const byte *block) /*!< in: block to write */
{
- ibool success;
-
- ut_ad(srv_track_changed_pages);
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
/* Simulate a write error */
DBUG_EXECUTE_IF("bitmap_page_write_error",
- ib_logf(IB_LOG_LEVEL_ERROR,
- "simulating bitmap write error in "
- "log_online_write_bitmap_page");
- return FALSE;);
-
- success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file,
- block, log_bmp_sys->out.offset,
+ {
+ ulint space_id
+ = mach_read_from_4(block
+ + MODIFIED_PAGE_SPACE_ID);
+ if (space_id > 0) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "simulating bitmap write "
+ "error in "
+ "log_online_write_bitmap_page "
+ "for space ID %lu",
+ space_id);
+ return FALSE;
+ }
+ });
+
+ ibool success = os_file_write(log_bmp_sys->out.name,
+ log_bmp_sys->out.file, block,
+ log_bmp_sys->out.offset,
MODIFIED_PAGE_BLOCK_SIZE);
if (UNIV_UNLIKELY(!success)) {
@@ -1143,11 +1163,7 @@ ibool
log_online_write_bitmap(void)
/*=========================*/
{
- ib_rbt_node_t *bmp_tree_node;
- const ib_rbt_node_t *last_bmp_tree_node;
- ibool success = TRUE;
-
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
if (log_bmp_sys->out.offset >= srv_max_bitmap_file_size) {
if (!log_online_rotate_bitmap_file(log_bmp_sys->start_lsn)) {
@@ -1155,9 +1171,12 @@ log_online_write_bitmap(void)
}
}
- bmp_tree_node = (ib_rbt_node_t *)
- rbt_first(log_bmp_sys->modified_pages);
- last_bmp_tree_node = rbt_last(log_bmp_sys->modified_pages);
+ ib_rbt_node_t *bmp_tree_node
+ = (ib_rbt_node_t *)rbt_first(log_bmp_sys->modified_pages);
+ const ib_rbt_node_t * const last_bmp_tree_node
+ = rbt_last(log_bmp_sys->modified_pages);
+
+ ibool success = TRUE;
while (bmp_tree_node) {
@@ -1190,9 +1209,11 @@ log_online_write_bitmap(void)
rbt_next(log_bmp_sys->modified_pages, bmp_tree_node);
DBUG_EXECUTE_IF("bitmap_page_2_write_error",
- ut_ad(bmp_tree_node); /* 2nd page must exist */
- DBUG_SET("+d,bitmap_page_write_error");
- DBUG_SET("-d,bitmap_page_2_write_error"););
+ if (bmp_tree_node)
+ {
+ DBUG_SET("+d,bitmap_page_write_error");
+ DBUG_SET("-d,bitmap_page_2_write_error");
+ });
}
rbt_reset(log_bmp_sys->modified_pages);
@@ -1213,10 +1234,19 @@ log_online_follow_redo_log(void)
log_group_t* group;
ibool result;
- ut_ad(srv_track_changed_pages);
ut_ad(!srv_read_only_mode);
- mutex_enter(&log_bmp_sys->mutex);
+ if (!srv_track_changed_pages)
+ return TRUE;
+
+ DEBUG_SYNC_C("log_online_follow_redo_log");
+
+ mutex_enter(&log_bmp_sys_mutex);
+
+ if (!srv_track_changed_pages) {
+ mutex_exit(&log_bmp_sys_mutex);
+ return TRUE;
+ }
/* Grab the LSN of the last checkpoint, we will parse up to it */
mutex_enter(&(log_sys->mutex));
@@ -1224,7 +1254,7 @@ log_online_follow_redo_log(void)
mutex_exit(&(log_sys->mutex));
if (log_bmp_sys->end_lsn == log_bmp_sys->start_lsn) {
- mutex_exit(&log_bmp_sys->mutex);
+ mutex_exit(&log_bmp_sys_mutex);
return TRUE;
}
@@ -1247,7 +1277,7 @@ log_online_follow_redo_log(void)
log_bmp_sys->start_lsn = log_bmp_sys->end_lsn;
log_set_tracked_lsn(log_bmp_sys->start_lsn);
- mutex_exit(&log_bmp_sys->mutex);
+ mutex_exit(&log_bmp_sys_mutex);
return result;
}
@@ -1594,6 +1624,8 @@ log_online_bitmap_iterator_init(
{
ut_a(i);
+ i->max_lsn = max_lsn;
+
if (UNIV_UNLIKELY(min_lsn > max_lsn)) {
/* Empty range */
@@ -1702,6 +1734,9 @@ log_online_bitmap_iterator_next(
return TRUE;
}
+ if (i->end_lsn >= i->max_lsn && i->last_page_in_run)
+ return FALSE;
+
while (!checksum_ok)
{
while (i->in.size < MODIFIED_PAGE_BLOCK_SIZE
@@ -1797,15 +1832,21 @@ log_online_purge_changed_page_bitmaps(
lsn = LSN_MAX;
}
+ bool log_bmp_sys_inited = false;
if (srv_redo_log_thread_started) {
/* User requests might happen with both enabled and disabled
tracking */
- mutex_enter(&log_bmp_sys->mutex);
+ log_bmp_sys_inited = true;
+ mutex_enter(&log_bmp_sys_mutex);
+ if (!srv_redo_log_thread_started) {
+ log_bmp_sys_inited = false;
+ mutex_exit(&log_bmp_sys_mutex);
+ }
}
if (!log_online_setup_bitmap_file_range(&bitmap_files, 0, LSN_MAX)) {
- if (srv_redo_log_thread_started) {
- mutex_exit(&log_bmp_sys->mutex);
+ if (log_bmp_sys_inited) {
+ mutex_exit(&log_bmp_sys_mutex);
}
return TRUE;
}
@@ -1843,7 +1884,7 @@ log_online_purge_changed_page_bitmaps(
}
}
- if (srv_redo_log_thread_started) {
+ if (log_bmp_sys_inited) {
if (lsn > log_bmp_sys->end_lsn) {
lsn_t new_file_lsn;
if (lsn == LSN_MAX) {
@@ -1859,7 +1900,7 @@ log_online_purge_changed_page_bitmaps(
}
}
- mutex_exit(&log_bmp_sys->mutex);
+ mutex_exit(&log_bmp_sys_mutex);
}
free(bitmap_files.files);
diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc
index 9e42fb5cc1c..01975712d99 100644
--- a/storage/xtradb/log/log0recv.cc
+++ b/storage/xtradb/log/log0recv.cc
@@ -2,6 +2,7 @@
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -176,7 +177,7 @@ UNIV_INTERN mysql_pfs_key_t recv_writer_mutex_key;
# endif /* UNIV_PFS_MUTEX */
/** Flag indicating if recv_writer thread is active. */
-UNIV_INTERN bool recv_writer_thread_active = false;
+static volatile bool recv_writer_thread_active;
UNIV_INTERN os_thread_t recv_writer_thread_handle = 0;
#endif /* !UNIV_HOTBACKUP */
@@ -342,8 +343,6 @@ DECLARE_THREAD(recv_writer_thread)(
os_thread_pf(os_thread_get_curr_id()));
#endif /* UNIV_DEBUG_THREAD_CREATION */
- recv_writer_thread_active = true;
-
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
os_thread_sleep(100000);
@@ -2895,11 +2894,10 @@ recv_scan_log_recs(
recv_init_crash_recovery();
} else {
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "Recovery skipped, "
- "--innodb-read-only set!");
-
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "innodb_read_only prevents"
+ " crash recovery");
+ recv_needed_recovery = TRUE;
return(TRUE);
}
}
@@ -3078,6 +3076,7 @@ recv_init_crash_recovery(void)
/* Spawn the background thread to flush dirty pages
from the buffer pools. */
+ recv_writer_thread_active = true;
recv_writer_thread_handle = os_thread_create(
recv_writer_thread, 0, 0);
}
@@ -3323,6 +3322,11 @@ recv_recovery_from_checkpoint_start_func(
/* Done with startup scan. Clear the flag. */
recv_log_scan_is_startup_type = FALSE;
+
+ if (srv_read_only_mode && recv_needed_recovery) {
+ return(DB_READ_ONLY);
+ }
+
if (TYPE_CHECKPOINT) {
/* NOTE: we always do a 'recovery' at startup, but only if
there is something wrong we will print a message to the
@@ -3473,15 +3477,6 @@ void
recv_recovery_from_checkpoint_finish(void)
/*======================================*/
{
- /* Apply the hashed log records to the respective file pages */
-
- if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
-
- recv_apply_hashed_log_recs(TRUE);
- }
-
- DBUG_PRINT("ib_log", ("apply completed"));
-
if (recv_needed_recovery) {
trx_sys_print_mysql_master_log_pos();
trx_sys_print_mysql_binlog_offset();
diff --git a/storage/xtradb/mach/mach0data.cc b/storage/xtradb/mach/mach0data.cc
index 206434dc5ab..feeedb01609 100644
--- a/storage/xtradb/mach/mach0data.cc
+++ b/storage/xtradb/mach/mach0data.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -55,7 +55,6 @@ mach_parse_compressed(
if (flag < 0x80UL) {
*val = flag;
return(ptr + 1);
-
}
/* Workaround GCC bug
@@ -64,7 +63,11 @@ mach_parse_compressed(
function, causing and out-of-bounds read if we are reading a short
integer close to the end of buffer. */
#if defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__clang__)
- asm volatile("": : :"memory");
+#define DEPLOY_FENCE
+#endif
+
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
#endif
if (flag < 0xC0UL) {
@@ -75,8 +78,13 @@ mach_parse_compressed(
*val = mach_read_from_2(ptr) & 0x7FFFUL;
return(ptr + 2);
+ }
- } else if (flag < 0xE0UL) {
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#endif
+
+ if (flag < 0xE0UL) {
if (end_ptr < ptr + 3) {
return(NULL);
}
@@ -84,7 +92,13 @@ mach_parse_compressed(
*val = mach_read_from_3(ptr) & 0x3FFFFFUL;
return(ptr + 3);
- } else if (flag < 0xF0UL) {
+ }
+
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#endif
+
+ if (flag < 0xF0UL) {
if (end_ptr < ptr + 4) {
return(NULL);
}
@@ -92,14 +106,20 @@ mach_parse_compressed(
*val = mach_read_from_4(ptr) & 0x1FFFFFFFUL;
return(ptr + 4);
- } else {
- ut_ad(flag == 0xF0UL);
+ }
- if (end_ptr < ptr + 5) {
- return(NULL);
- }
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#endif
- *val = mach_read_from_4(ptr + 1);
- return(ptr + 5);
+#undef DEPLOY_FENCE
+
+ ut_ad(flag == 0xF0UL);
+
+ if (end_ptr < ptr + 5) {
+ return(NULL);
}
+
+ *val = mach_read_from_4(ptr + 1);
+ return(ptr + 5);
}
diff --git a/storage/xtradb/mtr/mtr0mtr.cc b/storage/xtradb/mtr/mtr0mtr.cc
index 5ed0803727f..ed44fba9762 100644
--- a/storage/xtradb/mtr/mtr0mtr.cc
+++ b/storage/xtradb/mtr/mtr0mtr.cc
@@ -312,7 +312,6 @@ mtr_commit(
/*=======*/
mtr_t* mtr) /*!< in: mini-transaction */
{
- ut_ad(mtr);
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(!mtr->inside_ibuf);
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index 817289352f2..2a651a84a1a 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
+Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -215,11 +216,15 @@ struct os_aio_array_t{
os_event_t not_full;
/*!< The event which is set to the
signaled state when there is space in
- the aio outside the ibuf segment */
+ the aio outside the ibuf segment;
+ os_event_set() and os_event_reset()
+ are protected by os_aio_array_t::mutex */
os_event_t is_empty;
/*!< The event which is set to the
signaled state when there are no
- pending i/os in this array */
+ pending i/os in this array;
+ os_event_set() and os_event_reset()
+ are protected by os_aio_array_t::mutex */
ulint n_slots;/*!< Total number of slots in the aio
array. This must be divisible by
n_threads. */
@@ -261,8 +266,8 @@ struct os_aio_array_t{
#define OS_AIO_IO_SETUP_RETRY_ATTEMPTS 5
#endif
-/** Array of events used in simulated aio */
-static os_event_t* os_aio_segment_wait_events = NULL;
+/** Array of events used in simulated aio. */
+static os_event_t* os_aio_segment_wait_events;
/** The aio arrays for non-ibuf i/o and ibuf i/o, as well as sync aio. These
are NULL when the module has not yet been initialized. @{ */
@@ -1103,50 +1108,15 @@ next_file:
char* full_path;
int ret;
struct stat statinfo;
-#ifdef HAVE_READDIR_R
- char dirent_buf[sizeof(struct dirent)
- + _POSIX_PATH_MAX + 100];
- /* In /mysys/my_lib.c, _POSIX_PATH_MAX + 1 is used as
- the max file name len; but in most standards, the
- length is NAME_MAX; we add 100 to be even safer */
-#endif
next_file:
-#ifdef HAVE_READDIR_R
- ret = readdir_r(dir, (struct dirent*) dirent_buf, &ent);
-
- if (ret != 0
-#ifdef UNIV_AIX
- /* On AIX, only if we got non-NULL 'ent' (result) value and
- a non-zero 'ret' (return) value, it indicates a failed
- readdir_r() call. An NULL 'ent' with an non-zero 'ret'
- would indicate the "end of the directory" is reached. */
- && ent != NULL
-#endif
- ) {
- fprintf(stderr,
- "InnoDB: cannot read directory %s, error %lu\n",
- dirname, (ulong) ret);
-
- return(-1);
- }
-
- if (ent == NULL) {
- /* End of directory */
-
- return(1);
- }
-
- ut_a(strlen(ent->d_name) < _POSIX_PATH_MAX + 100 - 1);
-#else
ent = readdir(dir);
if (ent == NULL) {
return(1);
}
-#endif
ut_a(strlen(ent->d_name) < OS_FILE_MAX_PATH);
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) {
@@ -1473,7 +1443,11 @@ os_file_set_nocache_if_needed(os_file_t file, const char* name,
&& (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
|| (srv_unix_file_flush_method
== SRV_UNIX_O_DIRECT_NO_FSYNC))))
- os_file_set_nocache(file, name, mode_str);
+ /* Do fsync() on log files when setting O_DIRECT fails.
+ See log_io_complete() */
+ if (!os_file_set_nocache(file, name, mode_str)
+ && srv_unix_file_flush_method == SRV_UNIX_ALL_O_DIRECT)
+ srv_unix_file_flush_method = SRV_UNIX_O_DIRECT;
}
/****************************************************************//**
@@ -1644,9 +1618,10 @@ os_file_create_simple_no_error_handling_func(
}
/****************************************************************//**
-Tries to disable OS caching on an opened file descriptor. */
+Tries to disable OS caching on an opened file descriptor.
+@return TRUE if operation is success and FALSE otherwise */
UNIV_INTERN
-void
+bool
os_file_set_nocache(
/*================*/
os_file_t fd /*!< in: file descriptor to alter */
@@ -1667,6 +1642,7 @@ os_file_set_nocache(
"Failed to set DIRECTIO_ON on file %s: %s: %s, "
"continuing anyway.",
file_name, operation_name, strerror(errno_save));
+ return false;
}
#elif defined(O_DIRECT)
if (fcntl(fd, F_SETFL, O_DIRECT) == -1) {
@@ -1697,8 +1673,10 @@ short_warning:
"continuing anyway.",
file_name, operation_name, strerror(errno_save));
}
+ return false;
}
#endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */
+ return true;
}
@@ -2356,48 +2334,52 @@ os_file_set_size(
os_file_t file, /*!< in: handle to a file */
os_offset_t size) /*!< in: file size */
{
- os_offset_t current_size;
ibool ret;
byte* buf;
byte* buf2;
ulint buf_size;
- current_size = 0;
-
#ifdef HAVE_POSIX_FALLOCATE
if (srv_use_posix_fallocate) {
+ int err;
+ do {
+ err = posix_fallocate(file, 0, size);
+ } while (err == EINTR
+ && srv_shutdown_state == SRV_SHUTDOWN_NONE);
- if (posix_fallocate(file, current_size, size) == -1) {
-
- ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file "
- "space for file \'%s\' failed. Current size "
- INT64PF ", desired size " INT64PF,
- name, current_size, size);
- os_file_handle_error_no_exit (name, "posix_fallocate",
- FALSE);
- return(FALSE);
+ if (err) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "preallocating " INT64PF " bytes for"
+ "file %s failed with error %d",
+ size, name, err);
}
- return(TRUE);
+ return(!err);
}
#endif
+#ifdef _WIN32
+ /* Write 1 page of zeroes at the desired end. */
+ buf_size = UNIV_PAGE_SIZE;
+ os_offset_t current_size = size - buf_size;
+#else
/* Write up to 1 megabyte at a time. */
buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
- buf2 = static_cast<byte*>(ut_malloc(buf_size + UNIV_PAGE_SIZE));
+ os_offset_t current_size = 0;
+#endif
+ buf2 = static_cast<byte*>(calloc(1, buf_size + UNIV_PAGE_SIZE));
+
+ if (!buf2) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot allocate " ULINTPF " bytes to extend file\n",
+ buf_size + UNIV_PAGE_SIZE);
+ return(FALSE);
+ }
/* Align the buffer for possible raw i/o */
buf = static_cast<byte*>(ut_align(buf2, UNIV_PAGE_SIZE));
- /* Write buffer full of zeros */
- memset(buf, 0, buf_size);
-
- if (size >= (os_offset_t) 100 << 20) {
-
- fprintf(stderr, "InnoDB: Progress in MB:");
- }
-
- while (current_size < size) {
+ do {
ulint n_bytes;
if (size - current_size < (os_offset_t) buf_size) {
@@ -2408,37 +2390,15 @@ os_file_set_size(
ret = os_file_write(name, file, buf, current_size, n_bytes);
if (!ret) {
- ut_free(buf2);
- goto error_handling;
- }
-
- /* Print about progress for each 100 MB written */
- if ((current_size + n_bytes) / (100 << 20)
- != current_size / (100 << 20)) {
-
- fprintf(stderr, " %lu00",
- (ulong) ((current_size + n_bytes)
- / (100 << 20)));
+ break;
}
current_size += n_bytes;
- }
+ } while (current_size < size);
- if (size >= (os_offset_t) 100 << 20) {
-
- fprintf(stderr, "\n");
- }
+ free(buf2);
- ut_free(buf2);
-
- ret = os_file_flush(file);
-
- if (ret) {
- return(TRUE);
- }
-
-error_handling:
- return(FALSE);
+ return(ret && os_file_flush(file));
}
/***********************************************************************//**
@@ -4240,13 +4200,6 @@ os_aio_init(
os_aio_validate();
- os_aio_segment_wait_events = static_cast<os_event_t*>(
- ut_malloc(n_segments * sizeof *os_aio_segment_wait_events));
-
- for (ulint i = 0; i < n_segments; ++i) {
- os_aio_segment_wait_events[i] = os_event_create();
- }
-
os_last_printout = ut_time();
#ifdef _WIN32
@@ -4256,8 +4209,18 @@ os_aio_init(
ut_a(completion_port && read_completion_port);
#endif
- return(TRUE);
+ if (srv_use_native_aio) {
+ return(TRUE);
+ }
+
+ os_aio_segment_wait_events = static_cast<os_event_t*>(
+ ut_malloc(n_segments * sizeof *os_aio_segment_wait_events));
+
+ for (ulint i = 0; i < n_segments; ++i) {
+ os_aio_segment_wait_events[i] = os_event_create();
+ }
+ return(TRUE);
}
/***********************************************************************
@@ -4285,8 +4248,10 @@ os_aio_free(void)
os_aio_array_free(os_aio_read_array);
- for (ulint i = 0; i < os_aio_n_segments; i++) {
- os_event_free(os_aio_segment_wait_events[i]);
+ if (!srv_use_native_aio) {
+ for (ulint i = 0; i < os_aio_n_segments; i++) {
+ os_event_free(os_aio_segment_wait_events[i]);
+ }
}
#if !defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8
@@ -4346,22 +4311,17 @@ os_aio_wake_all_threads_at_shutdown(void)
if (os_aio_log_array != 0) {
os_aio_array_wake_win_aio_at_shutdown(os_aio_log_array);
}
-
#elif defined(LINUX_NATIVE_AIO)
-
/* When using native AIO interface the io helper threads
wait on io_getevents with a timeout value of 500ms. At
each wake up these threads check the server status.
No need to do anything to wake them up. */
+#endif /* !WIN_ASYNC_AIO */
if (srv_use_native_aio) {
return;
}
- /* Fall through to simulated AIO handler wakeup if we are
- not using native AIO. */
-#endif /* !WIN_ASYNC_AIO */
-
/* This loop wakes up all simulated ai/o threads */
for (ulint i = 0; i < os_aio_n_segments; i++) {
@@ -4729,6 +4689,7 @@ os_aio_simulated_wake_handler_threads(void)
}
}
+#ifdef _WIN32
/**********************************************************************//**
This function can be called if one wants to post a batch of reads and
prefers an i/o-handler thread to handle them all at once later. You must
@@ -4736,15 +4697,14 @@ call os_aio_simulated_wake_handler_threads later to ensure the threads
are not left sleeping! */
UNIV_INTERN
void
-os_aio_simulated_put_read_threads_to_sleep(void)
-/*============================================*/
+os_aio_simulated_put_read_threads_to_sleep()
{
/* The idea of putting background IO threads to sleep is only for
Windows when using simulated AIO. Windows XP seems to schedule
background threads too eagerly to allow for coalescing during
readahead requests. */
-#ifdef __WIN__
+
os_aio_array_t* array;
if (srv_use_native_aio) {
@@ -4763,8 +4723,8 @@ readahead requests. */
os_event_reset(os_aio_segment_wait_events[i]);
}
}
-#endif /* __WIN__ */
}
+#endif /* _WIN32 */
#if defined(LINUX_NATIVE_AIO)
/*******************************************************************//**
@@ -5934,11 +5894,12 @@ os_aio_print(
srv_io_thread_op_info[i],
srv_io_thread_function[i]);
-#ifndef __WIN__
- if (os_aio_segment_wait_events[i]->is_set()) {
+#ifndef _WIN32
+ if (!srv_use_native_aio
+ && os_aio_segment_wait_events[i]->is_set()) {
fprintf(file, " ev set");
}
-#endif /* __WIN__ */
+#endif /* _WIN32 */
fprintf(file, "\n");
}
diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc
index af826027efc..cb45929cab6 100644
--- a/storage/xtradb/os/os0thread.cc
+++ b/storage/xtradb/os/os0thread.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -210,29 +210,32 @@ os_thread_create_func(
#endif
}
-/**
-Waits until the specified thread completes and joins it. Its return value is
-ignored.
-
-@param thread thread to join */
+/** Waits until the specified thread completes and joins it.
+Its return value is ignored.
+@param[in,out] thread thread to join */
UNIV_INTERN
void
os_thread_join(
os_thread_t thread)
{
- /*This function is currently only used to workaround glibc bug
+ /* This function is currently only used to workaround glibc bug
described in http://bugs.mysql.com/bug.php?id=82886
On Windows, no workarounds are necessary, all threads
are "detached" upon thread exit (handle is closed), so we do
nothing.
*/
-#ifndef _WIN32
- int ret MY_ATTRIBUTE((unused)) = pthread_join(thread, NULL);
+#ifdef __WIN__
+ /* Do nothing. */
+#else
+#ifdef UNIV_DEBUG
+ const int ret MY_ATTRIBUTE((unused)) =
+#endif /* UNIV_DEBUG */
+ pthread_join(thread, NULL);
- /* Waiting on already-quit threads is allowed */
+ /* Waiting on already-quit threads is allowed. */
ut_ad(ret == 0 || ret == ESRCH);
-#endif
+#endif /* __WIN__ */
}
/*****************************************************************//**
@@ -261,8 +264,9 @@ os_thread_exit(
#ifdef __WIN__
ExitThread((DWORD) exit_value);
#else
- if (detach)
+ if (detach) {
pthread_detach(pthread_self());
+ }
pthread_exit(exit_value);
#endif
}
diff --git a/storage/xtradb/page/page0page.cc b/storage/xtradb/page/page0page.cc
index 48c4b53aaa4..1a8271de1e4 100644
--- a/storage/xtradb/page/page0page.cc
+++ b/storage/xtradb/page/page0page.cc
@@ -1447,7 +1447,6 @@ page_dir_split_slot(
ulint i;
ulint n_owned;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
@@ -1509,7 +1508,6 @@ page_dir_balance_slot(
rec_t* old_rec;
rec_t* new_rec;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc
index 21008ff0ffe..0d2bb7fb986 100644
--- a/storage/xtradb/page/page0zip.cc
+++ b/storage/xtradb/page/page0zip.cc
@@ -4814,8 +4814,6 @@ page_zip_parse_compress(
ulint size;
ulint trailer_size;
- ut_ad(ptr != NULL);
- ut_ad(end_ptr != NULL);
ut_ad(!page == !page_zip);
if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) {
diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc
index f3de279e182..8daece64b68 100644
--- a/storage/xtradb/rem/rem0rec.cc
+++ b/storage/xtradb/rem/rem0rec.cc
@@ -789,7 +789,7 @@ rec_get_nth_field_offs_old(
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
-UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
ulint
rec_get_converted_size_comp_prefix_low(
/*===================================*/
diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc
index cb47d605623..134127c6adf 100644
--- a/storage/xtradb/row/row0ftsort.cc
+++ b/storage/xtradb/row/row0ftsort.cc
@@ -58,6 +58,8 @@ tokenized doc string. The index has three "fields":
integer value)
3) Word's position in original doc.
+@see fts_create_one_index_table()
+
@return dict_index_t structure for the fts sort index */
UNIV_INTERN
dict_index_t*
@@ -99,16 +101,12 @@ row_merge_create_fts_sort_index(
field->prefix_len = 0;
field->col = static_cast<dict_col_t*>(
mem_heap_alloc(new_index->heap, sizeof(dict_col_t)));
- field->col->len = FTS_MAX_WORD_LEN;
-
- if (strcmp(charset->name, "latin1_swedish_ci") == 0) {
- field->col->mtype = DATA_VARCHAR;
- } else {
- field->col->mtype = DATA_VARMYSQL;
- }
-
field->col->prtype = idx_field->col->prtype | DATA_NOT_NULL;
+ field->col->mtype = charset == &my_charset_latin1
+ ? DATA_VARCHAR : DATA_VARMYSQL;
field->col->mbminmaxlen = idx_field->col->mbminmaxlen;
+ field->col->len = HA_FT_MAXCHARLEN * DATA_MBMAXLEN(field->col->mbminmaxlen);
+
field->fixed_len = 0;
/* Doc ID */
@@ -362,6 +360,7 @@ row_fts_free_pll_merge_buf(
/*********************************************************************//**
Tokenize incoming text data and add to the sort buffer.
+@see row_merge_buf_encode()
@return TRUE if the record passed, FALSE if out of space */
static
ibool
@@ -370,8 +369,6 @@ row_merge_fts_doc_tokenize(
row_merge_buf_t** sort_buf, /*!< in/out: sort buffer */
doc_id_t doc_id, /*!< in: Doc ID */
fts_doc_t* doc, /*!< in: Doc to be tokenized */
- dtype_t* word_dtype, /*!< in: data structure for
- word col */
merge_file_t** merge_file, /*!< in/out: merge file */
ibool opt_doc_id_size,/*!< in: whether to use 4 bytes
instead of 8 bytes integer to
@@ -403,7 +400,7 @@ row_merge_fts_doc_tokenize(
ulint idx = 0;
ib_uint32_t position;
ulint offset = 0;
- ulint cur_len = 0;
+ ulint cur_len;
doc_id_t write_doc_id;
inc = innobase_mysql_fts_get_token(
@@ -457,14 +454,34 @@ row_merge_fts_doc_tokenize(
dfield_set_data(field, t_str.f_str, t_str.f_len);
len = dfield_get_len(field);
- field->type.mtype = word_dtype->mtype;
- field->type.prtype = word_dtype->prtype | DATA_NOT_NULL;
+ dict_col_copy_type(dict_index_get_nth_col(buf->index, 0), &field->type);
+ field->type.prtype |= DATA_NOT_NULL;
+ ut_ad(len <= field->type.len);
- /* Variable length field, set to max size. */
- field->type.len = FTS_MAX_WORD_LEN;
- field->type.mbminmaxlen = word_dtype->mbminmaxlen;
+ /* For the temporary file, row_merge_buf_encode() uses
+ 1 byte for representing the number of extra_size bytes.
+ This number will always be 1, because for this 3-field index
+ consisting of one variable-size column, extra_size will always
+ be 1 or 2, which can be encoded in one byte.
+
+ The extra_size is 1 byte if the length of the
+ variable-length column is less than 128 bytes or the
+ maximum length is less than 256 bytes. */
+
+ /* One variable length column, word with its lenght less than
+ fts_max_token_size, add one extra size and one extra byte.
+
+ Since the max length for FTS token now is larger than 255,
+ so we will need to signify length byte itself, so only 1 to 128
+ bytes can be used for 1 bytes, larger than that 2 bytes. */
+ if (len < 128 || field->type.len < 256) {
+ /* Extra size is one byte. */
+ cur_len = 2 + len;
+ } else {
+ /* Extra size is two bytes. */
+ cur_len = 3 + len;
+ }
- cur_len += len;
dfield_dup(field, buf->heap);
field++;
@@ -514,20 +531,6 @@ row_merge_fts_doc_tokenize(
cur_len += len;
dfield_dup(field, buf->heap);
- /* One variable length column, word with its lenght less than
- fts_max_token_size, add one extra size and one extra byte.
-
- Since the max length for FTS token now is larger than 255,
- so we will need to signify length byte itself, so only 1 to 128
- bytes can be used for 1 bytes, larger than that 2 bytes. */
- if (t_str.f_len < 128) {
- /* Extra size is one byte. */
- cur_len += 2;
- } else {
- /* Extra size is two bytes. */
- cur_len += 3;
- }
-
/* Reserve one byte for the end marker of row_merge_block_t. */
if (buf->total_size + data_size[idx] + cur_len
>= srv_sort_buf_size - 1) {
@@ -620,8 +623,6 @@ fts_parallel_tokenization(
mem_heap_t* blob_heap = NULL;
fts_doc_t doc;
dict_table_t* table = psort_info->psort_common->new_table;
- dtype_t word_dtype;
- dict_field_t* idx_field;
fts_tokenize_ctx_t t_ctx;
ulint retried = 0;
dberr_t error = DB_SUCCESS;
@@ -643,13 +644,6 @@ fts_parallel_tokenization(
doc.charset = fts_index_get_charset(
psort_info->psort_common->dup->index);
- idx_field = dict_index_get_nth_field(
- psort_info->psort_common->dup->index, 0);
- word_dtype.prtype = idx_field->col->prtype;
- word_dtype.mbminmaxlen = idx_field->col->mbminmaxlen;
- word_dtype.mtype = (strcmp(doc.charset->name, "latin1_swedish_ci") == 0)
- ? DATA_VARCHAR : DATA_VARMYSQL;
-
block = psort_info->merge_block;
zip_size = dict_table_zip_size(table);
@@ -699,7 +693,6 @@ loop:
processed = row_merge_fts_doc_tokenize(
buf, doc_item->doc_id, &doc,
- &word_dtype,
merge_file, psort_info->psort_common->opt_doc_id_size,
&t_ctx);
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index c1436f9e1b7..bcc37fb530c 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -952,14 +952,8 @@ row_merge_read_rec(
ulint data_size;
ulint avail_size;
- ut_ad(block);
- ut_ad(buf);
ut_ad(b >= &block[0]);
ut_ad(b < &block[srv_sort_buf_size]);
- ut_ad(index);
- ut_ad(foffs);
- ut_ad(mrec);
- ut_ad(offsets);
ut_ad(*offsets == 1 + REC_OFFS_HEADER_SIZE
+ dict_index_get_n_fields(index));
@@ -3833,8 +3827,8 @@ wait_again:
for (j = 0; j < FTS_NUM_AUX_INDEX;
j++) {
- os_thread_join(merge_info[j]
- .thread_hdl);
+ os_thread_join(merge_info[j]
+ .thread_hdl);
}
}
} else {
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index b366a55b2d9..68b4e1ecea5 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -1365,6 +1365,8 @@ run_again:
row_ins_step(thr);
+ DEBUG_SYNC_C("ib_after_row_insert_step");
+
err = trx->error_state;
if (err != DB_SUCCESS) {
diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc
index bc2e0b0e1cb..35b3520749b 100644
--- a/storage/xtradb/row/row0purge.cc
+++ b/storage/xtradb/row/row0purge.cc
@@ -897,7 +897,7 @@ row_purge_record_func(
Fetches an undo log record and does the purge for the recorded operation.
If none left, or the current purge completed, returns the control to the
parent node, which is always a query thread node. */
-static MY_ATTRIBUTE((nonnull))
+static
void
row_purge(
/*======*/
diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc
index e43a18a43af..3881fe410ec 100644
--- a/storage/xtradb/row/row0upd.cc
+++ b/storage/xtradb/row/row0upd.cc
@@ -1281,8 +1281,6 @@ row_upd_index_replace_new_col_vals_index_pos(
ulint n_fields;
const ulint zip_size = dict_table_zip_size(index->table);
- ut_ad(index);
-
dtuple_set_info_bits(entry, update->info_bits);
if (order_only) {
@@ -1467,8 +1465,6 @@ row_upd_changes_ord_field_binary_func(
ulint i;
const dict_index_t* clust_index;
- ut_ad(index);
- ut_ad(update);
ut_ad(thr);
ut_ad(thr->graph);
ut_ad(thr->graph->trx);
diff --git a/storage/xtradb/srv/srv0conc.cc b/storage/xtradb/srv/srv0conc.cc
index 0580d7d0054..dadb1f19a8a 100644
--- a/storage/xtradb/srv/srv0conc.cc
+++ b/storage/xtradb/srv/srv0conc.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -82,7 +83,9 @@ typedef UT_LIST_NODE_T(struct srv_conc_slot_t) srv_conc_node_t;
/** Slot for a thread waiting in the concurrency control queue. */
struct srv_conc_slot_t{
- os_event_t event; /*!< event to wait */
+ os_event_t event; /*!< event to wait for;
+ os_event_set() and os_event_reset()
+ are protected by srv_conc_mutex */
ibool reserved; /*!< TRUE if slot
reserved */
ibool wait_ended; /*!< TRUE when another thread has
@@ -381,11 +384,11 @@ srv_conc_exit_innodb_without_atomics(
}
}
- os_fast_mutex_unlock(&srv_conc_mutex);
-
if (slot != NULL) {
os_event_set(slot->event);
}
+
+ os_fast_mutex_unlock(&srv_conc_mutex);
}
/*********************************************************************//**
diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc
index c04b446f480..a76c6aaf62b 100644
--- a/storage/xtradb/srv/srv0srv.cc
+++ b/storage/xtradb/srv/srv0srv.cc
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2014, SkySQL Ab. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation Ab. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -74,12 +74,6 @@ Created 10/8/1995 Heikki Tuuri
#include "mysql/plugin.h"
#include "mysql/service_thd_wait.h"
-/* prototypes of new functions added to ha_innodb.cc for kill_idle_transaction */
-ibool innobase_thd_is_idle(const void* thd);
-ib_int64_t innobase_thd_get_start_time(const void* thd);
-void innobase_thd_kill(ulong thd_id);
-ulong innobase_thd_get_thread_id(const void* thd);
-
/* prototypes for new functions added to ha_innodb.cc */
ibool innobase_get_slow_log();
@@ -440,11 +434,6 @@ starting from SRV_FORCE_IGNORE_CORRUPT, so that data can be recovered
by SELECT or mysqldump. When this is nonzero, we do not allow any user
modifications to the data. */
UNIV_INTERN ulong srv_force_recovery;
-#ifndef DBUG_OFF
-/** Inject a crash at different steps of the recovery process.
-This is for testing and debugging only. */
-UNIV_INTERN ulong srv_force_recovery_crash;
-#endif /* !DBUG_OFF */
/** Print all user-level transactions deadlocks to mysqld stderr */
@@ -473,6 +462,7 @@ this many index pages, there are 2 ways to calculate statistics:
table/index are not found in the innodb database */
UNIV_INTERN unsigned long long srv_stats_transient_sample_pages = 8;
UNIV_INTERN my_bool srv_stats_persistent = TRUE;
+UNIV_INTERN my_bool srv_stats_include_delete_marked = FALSE;
UNIV_INTERN unsigned long long srv_stats_persistent_sample_pages = 20;
UNIV_INTERN my_bool srv_stats_auto_recalc = TRUE;
@@ -747,7 +737,11 @@ struct srv_sys_t{
ulint n_sys_threads; /*!< size of the sys_threads
array */
- srv_slot_t* sys_threads; /*!< server thread table */
+ srv_slot_t* sys_threads; /*!< server thread table;
+ os_event_set() and
+ os_event_reset() on
+ sys_threads[]->event are
+ covered by srv_sys_t::mutex */
ulint n_threads_active[SRV_MASTER + 1];
/*!< number of threads active
@@ -769,13 +763,16 @@ UNIV_INTERN ib_mutex_t server_mutex;
static srv_sys_t* srv_sys = NULL;
-/** Event to signal the monitor thread. */
+/** Event to signal srv_monitor_thread. Not protected by a mutex.
+Set after setting srv_print_innodb_monitor. */
UNIV_INTERN os_event_t srv_monitor_event;
-/** Event to signal the error thread */
+/** Event to signal the shutdown of srv_error_monitor_thread.
+Not protected by a mutex. */
UNIV_INTERN os_event_t srv_error_event;
-/** Event to signal the buffer pool dump/load thread */
+/** Event for waking up buf_dump_thread. Not protected by a mutex.
+Set on shutdown or by buf_dump_start() or buf_load_start(). */
UNIV_INTERN os_event_t srv_buf_dump_event;
/** The buffer pool dump/load file name */
@@ -945,7 +942,6 @@ srv_suspend_thread_low(
/*===================*/
srv_slot_t* slot) /*!< in/out: thread slot */
{
-
ut_ad(!srv_read_only_mode);
ut_ad(srv_sys_mutex_own());
@@ -1003,34 +999,71 @@ srv_suspend_thread(
return(sig_count);
}
-/*********************************************************************//**
-Releases threads of the type given from suspension in the thread table.
-NOTE! The server mutex has to be reserved by the caller!
-@return number of threads released: this may be less than n if not
- enough threads were suspended at the moment. */
-UNIV_INTERN
-ulint
-srv_release_threads(
-/*================*/
- srv_thread_type type, /*!< in: thread type */
- ulint n) /*!< in: number of threads to release */
+/** Resume the calling thread.
+@param[in,out] slot thread slot
+@param[in] sig_count signal count (if wait)
+@param[in] wait whether to wait for the event
+@param[in] timeout_usec timeout in microseconds (0=infinite)
+@return whether the wait timed out */
+static
+bool
+srv_resume_thread(srv_slot_t* slot, ib_int64_t sig_count = 0, bool wait = true,
+ ulint timeout_usec = 0)
+{
+ bool timeout;
+
+ ut_ad(!srv_read_only_mode);
+ ut_ad(slot->in_use);
+ ut_ad(slot->suspended);
+
+ if (!wait) {
+ timeout = false;
+ } else if (timeout_usec) {
+ timeout = OS_SYNC_TIME_EXCEEDED == os_event_wait_time_low(
+ slot->event, timeout_usec, sig_count);
+ } else {
+ timeout = false;
+ os_event_wait_low(slot->event, sig_count);
+ }
+
+ srv_sys_mutex_enter();
+ ut_ad(slot->in_use);
+ ut_ad(slot->suspended);
+
+ slot->suspended = FALSE;
+ ++srv_sys->n_threads_active[slot->type];
+ srv_sys_mutex_exit();
+ return(timeout);
+}
+
+/** Ensure that a given number of threads of the type given are running
+(or are already terminated).
+@param[in] type thread type
+@param[in] n number of threads that have to run */
+void
+srv_release_threads(enum srv_thread_type type, ulint n)
{
- ulint i;
- ulint count = 0;
+ ulint running;
ut_ad(srv_thread_type_validate(type));
ut_ad(n > 0);
- srv_sys_mutex_enter();
+ do {
+ running = 0;
- for (i = 0; i < srv_sys->n_sys_threads; i++) {
- srv_slot_t* slot;
+ srv_sys_mutex_enter();
- slot = &srv_sys->sys_threads[i];
+ for (ulint i = 0; i < srv_sys->n_sys_threads; i++) {
+ srv_slot_t* slot = &srv_sys->sys_threads[i];
- if (slot->in_use
- && srv_slot_get_type(slot) == type
- && slot->suspended) {
+ if (!slot->in_use || srv_slot_get_type(slot) != type) {
+ continue;
+ } else if (!slot->suspended) {
+ if (++running >= n) {
+ break;
+ }
+ continue;
+ }
switch (type) {
case SRV_NONE:
@@ -1060,21 +1093,11 @@ srv_release_threads(
break;
}
- slot->suspended = FALSE;
-
- ++srv_sys->n_threads_active[type];
-
os_event_set(slot->event);
-
- if (++count == n) {
- break;
- }
}
- }
- srv_sys_mutex_exit();
-
- return(count);
+ srv_sys_mutex_exit();
+ } while (running && running < n);
}
/*********************************************************************//**
@@ -1087,11 +1110,8 @@ srv_free_slot(
{
srv_sys_mutex_enter();
- if (!slot->suspended) {
- /* Mark the thread as inactive. */
- srv_suspend_thread_low(slot);
- }
-
+ /* Mark the thread as inactive. */
+ srv_suspend_thread_low(slot);
/* Free the slot for reuse. */
ut_ad(slot->in_use);
slot->in_use = FALSE;
@@ -1402,22 +1422,26 @@ srv_printf_innodb_monitor(
low level 135. Therefore we can reserve the latter mutex here without
a danger of a deadlock of threads. */
- mutex_enter(&dict_foreign_err_mutex);
+ if (!recv_recovery_on) {
- if (!srv_read_only_mode && ftell(dict_foreign_err_file) != 0L) {
- fputs("------------------------\n"
- "LATEST FOREIGN KEY ERROR\n"
- "------------------------\n", file);
- ut_copy_file(file, dict_foreign_err_file);
- }
+ mutex_enter(&dict_foreign_err_mutex);
- mutex_exit(&dict_foreign_err_mutex);
+ if (!srv_read_only_mode
+ && ftell(dict_foreign_err_file) != 0L) {
+ fputs("------------------------\n"
+ "LATEST FOREIGN KEY ERROR\n"
+ "------------------------\n", file);
+ ut_copy_file(file, dict_foreign_err_file);
+ }
+
+ mutex_exit(&dict_foreign_err_mutex);
+ }
/* Only if lock_print_info_summary proceeds correctly,
before we call the lock_print_info_all_transactions
to print all the lock information. IMPORTANT NOTE: This
function acquires the lock mutex on success. */
- ret = lock_print_info_summary(file, nowait);
+ ret = recv_recovery_on ? FALSE : lock_print_info_summary(file, nowait);
if (ret) {
if (trx_start_pos) {
@@ -1450,10 +1474,13 @@ srv_printf_innodb_monitor(
"--------\n", file);
os_aio_print(file);
- fputs("-------------------------------------\n"
- "INSERT BUFFER AND ADAPTIVE HASH INDEX\n"
- "-------------------------------------\n", file);
- ibuf_print(file);
+ if (!recv_recovery_on) {
+
+ fputs("-------------------------------------\n"
+ "INSERT BUFFER AND ADAPTIVE HASH INDEX\n"
+ "-------------------------------------\n", file);
+ ibuf_print(file);
+ }
fprintf(file,
@@ -1465,10 +1492,13 @@ srv_printf_innodb_monitor(
btr_cur_n_sea_old = btr_cur_n_sea;
btr_cur_n_non_sea_old = btr_cur_n_non_sea;
- fputs("---\n"
- "LOG\n"
- "---\n", file);
- log_print(file);
+ if (!recv_recovery_on) {
+
+ fputs("---\n"
+ "LOG\n"
+ "---\n", file);
+ log_print(file);
+ }
fputs("----------------------\n"
"BUFFER POOL AND MEMORY\n"
@@ -1563,8 +1593,9 @@ srv_printf_innodb_monitor(
? (recv_sys->addr_hash->n_cells * sizeof(hash_cell_t)) : 0),
recv_sys_subtotal);
+
fprintf(file, "Dictionary memory allocated " ULINTPF "\n",
- dict_sys->size);
+ dict_sys ? dict_sys->size : 0);
buf_print_io(file);
@@ -1672,6 +1703,10 @@ srv_printf_innodb_monitor(
mutex_exit(&srv_innodb_monitor_mutex);
fflush(file);
+#ifndef DBUG_OFF
+ srv_debug_monitor_printed = true;
+#endif
+
return(ret);
}
@@ -1982,6 +2017,12 @@ srv_export_innodb_status(void)
mutex_exit(&srv_innodb_monitor_mutex);
}
+#ifndef DBUG_OFF
+/** false before InnoDB monitor has been printed at least once, true
+afterwards */
+bool srv_debug_monitor_printed = false;
+#endif
+
/*********************************************************************//**
A thread which prints the info output by various InnoDB monitors.
@return a dummy parameter */
@@ -2266,36 +2307,6 @@ loop:
old_sema = sema;
}
- if (srv_kill_idle_transaction && trx_sys) {
- trx_t* trx;
- time_t now;
-rescan_idle:
- now = time(NULL);
- mutex_enter(&trx_sys->mutex);
- trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
- while (trx) {
- if (trx->state == TRX_STATE_ACTIVE
- && trx->mysql_thd
- && innobase_thd_is_idle(trx->mysql_thd)) {
- ib_int64_t start_time = innobase_thd_get_start_time(trx->mysql_thd);
- ulong thd_id = innobase_thd_get_thread_id(trx->mysql_thd);
-
- if (trx->last_stmt_start != start_time) {
- trx->idle_start = now;
- trx->last_stmt_start = start_time;
- } else if (difftime(now, trx->idle_start)
- > srv_kill_idle_transaction) {
- /* kill the session */
- mutex_exit(&trx_sys->mutex);
- innobase_thd_kill(thd_id);
- goto rescan_idle;
- }
- }
- trx = UT_LIST_GET_NEXT(mysql_trx_list, trx);
- }
- mutex_exit(&trx_sys->mutex);
- }
-
/* Flush stderr so that a database user gets the output
to possible MySQL error file */
@@ -2451,10 +2462,8 @@ DECLARE_THREAD(srv_redo_log_follow_thread)(
} while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE);
- srv_track_changed_pages = FALSE;
log_online_read_shutdown();
os_event_set(srv_redo_log_tracked_event);
- srv_redo_log_thread_started = false; /* Defensive, not required */
my_thread_end();
os_thread_exit(NULL);
@@ -2620,15 +2629,7 @@ srv_active_wake_master_thread(void)
if (slot->in_use) {
ut_a(srv_slot_get_type(slot) == SRV_MASTER);
-
- if (slot->suspended) {
-
- slot->suspended = FALSE;
-
- ++srv_sys->n_threads_active[SRV_MASTER];
-
- os_event_set(slot->event);
- }
+ os_event_set(slot->event);
}
srv_sys_mutex_exit();
@@ -3149,7 +3150,7 @@ suspend_thread:
manual also mentions this string in several places. */
srv_main_thread_op_info = "waiting for server activity";
- os_event_wait(slot->event);
+ srv_resume_thread(slot);
if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
os_thread_exit(NULL);
@@ -3271,8 +3272,7 @@ DECLARE_THREAD(srv_worker_thread)(
do {
srv_suspend_thread(slot);
-
- os_event_wait(slot->event);
+ srv_resume_thread(slot);
srv_current_thread_priority = srv_purge_thread_priority;
@@ -3412,8 +3412,6 @@ srv_purge_coordinator_suspend(
ib_int64_t sig_count = srv_suspend_thread(slot);
do {
- ulint ret;
-
rw_lock_x_lock(&purge_sys->latch);
purge_sys->running = false;
@@ -3422,32 +3420,11 @@ srv_purge_coordinator_suspend(
/* We don't wait right away on the the non-timed wait because
we want to signal the thread that wants to suspend purge. */
-
- if (stop) {
- os_event_wait_low(slot->event, sig_count);
- ret = 0;
- } else if (rseg_history_len <= trx_sys->rseg_history_len) {
- ret = os_event_wait_time_low(
- slot->event, SRV_PURGE_MAX_TIMEOUT, sig_count);
- } else {
- /* We don't want to waste time waiting, if the
- history list increased by the time we got here,
- unless purge has been stopped. */
- ret = 0;
- }
-
- srv_sys_mutex_enter();
-
- /* The thread can be in state !suspended after the timeout
- but before this check if another thread sent a wakeup signal. */
-
- if (slot->suspended) {
- slot->suspended = FALSE;
- ++srv_sys->n_threads_active[slot->type];
- ut_a(srv_sys->n_threads_active[slot->type] == 1);
- }
-
- srv_sys_mutex_exit();
+ const bool wait = stop
+ || rseg_history_len <= trx_sys->rseg_history_len;
+ const bool timeout = srv_resume_thread(
+ slot, sig_count, wait,
+ stop ? 0 : SRV_PURGE_MAX_TIMEOUT);
sig_count = srv_suspend_thread(slot);
@@ -3459,6 +3436,19 @@ srv_purge_coordinator_suspend(
if (!stop) {
ut_a(purge_sys->n_stop == 0);
purge_sys->running = true;
+
+ if (timeout
+ && rseg_history_len == trx_sys->rseg_history_len
+ && trx_sys->rseg_history_len < 5000) {
+ /* No new records were added since the
+ wait started. Simply wait for new
+ records. The magic number 5000 is an
+ approximation for the case where we
+ have cached UNDO log records which
+ prevent truncate of the UNDO
+ segments. */
+ stop = true;
+ }
} else {
ut_a(purge_sys->n_stop > 0);
@@ -3467,33 +3457,9 @@ srv_purge_coordinator_suspend(
}
rw_lock_x_unlock(&purge_sys->latch);
-
- if (ret == OS_SYNC_TIME_EXCEEDED) {
-
- /* No new records added since wait started then simply
- wait for new records. The magic number 5000 is an
- approximation for the case where we have cached UNDO
- log records which prevent truncate of the UNDO
- segments. */
-
- if (rseg_history_len == trx_sys->rseg_history_len
- && trx_sys->rseg_history_len < 5000) {
-
- stop = true;
- }
- }
-
} while (stop);
- srv_sys_mutex_enter();
-
- if (slot->suspended) {
- slot->suspended = FALSE;
- ++srv_sys->n_threads_active[slot->type];
- ut_a(srv_sys->n_threads_active[slot->type] == 1);
- }
-
- srv_sys_mutex_exit();
+ srv_resume_thread(slot, 0, false);
}
/*********************************************************************//**
@@ -3549,8 +3515,9 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
srv_purge_coordinator_suspend(slot, rseg_history_len);
}
+ ut_ad(!slot->suspended);
+
if (srv_purge_should_exit(n_total_purged)) {
- ut_a(!slot->suspended);
break;
}
@@ -3665,12 +3632,10 @@ srv_get_task_queue_length(void)
return(n_tasks);
}
-/**********************************************************************//**
-Wakeup the purge threads. */
+/** Wake up the purge threads. */
UNIV_INTERN
void
-srv_purge_wakeup(void)
-/*==================*/
+srv_purge_wakeup()
{
ut_ad(!srv_read_only_mode);
@@ -3685,4 +3650,3 @@ srv_purge_wakeup(void)
}
}
}
-
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 270489a1973..35b378ec831 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -613,19 +613,6 @@ create_log_file(
/** Initial number of the first redo log file */
#define INIT_LOG_FILE0 (SRV_N_LOG_FILES_MAX + 1)
-#ifdef DBUG_OFF
-# define RECOVERY_CRASH(x) do {} while(0)
-#else
-# define RECOVERY_CRASH(x) do { \
- if (srv_force_recovery_crash == x) { \
- fprintf(stderr, "innodb_force_recovery_crash=%lu\n", \
- srv_force_recovery_crash); \
- fflush(stderr); \
- exit(3); \
- } \
-} while (0)
-#endif
-
/*********************************************************************//**
Creates all log files.
@return DB_SUCCESS or error code */
@@ -666,13 +653,14 @@ create_log_files(
file should be recoverable. The buffer
pool was clean, and we can simply create
all log files from the scratch. */
- RECOVERY_CRASH(6);
+ DBUG_EXECUTE_IF("innodb_log_abort_6",
+ return(DB_ERROR););
}
}
ut_ad(!buf_pool_check_no_pending_io());
- RECOVERY_CRASH(7);
+ DBUG_EXECUTE_IF("innodb_log_abort_7", return(DB_ERROR););
for (unsigned i = 0; i < srv_n_log_files; i++) {
sprintf(logfilename + dirnamelen,
@@ -685,7 +673,7 @@ create_log_files(
}
}
- RECOVERY_CRASH(8);
+ DBUG_EXECUTE_IF("innodb_log_abort_8", return(DB_ERROR););
/* We did not create the first log file initially as
ib_logfile0, so that crash recovery cannot find it until it
@@ -740,10 +728,16 @@ create_log_files(
return(DB_SUCCESS);
}
-/*********************************************************************//**
-Renames the first log file. */
+/** Rename the first redo log file.
+@param[in,out] logfilename buffer for the log file name
+@param[in] dirnamelen length of the directory path
+@param[in] lsn FIL_PAGE_FILE_FLUSH_LSN value
+@param[in,out] logfile0 name of the first log file
+@return error code
+@retval DB_SUCCESS on successful operation */
+MY_ATTRIBUTE((warn_unused_result, nonnull))
static
-void
+dberr_t
create_log_files_rename(
/*====================*/
char* logfilename, /*!< in/out: buffer for log file name */
@@ -754,6 +748,9 @@ create_log_files_rename(
/* If innodb_flush_method=O_DSYNC,
we need to explicitly flush the log buffers. */
fil_flush(SRV_LOG_SPACE_FIRST_ID);
+
+ DBUG_EXECUTE_IF("innodb_log_abort_9", return(DB_ERROR););
+
/* Close the log files, so that we can rename
the first one. */
fil_close_log_files(false);
@@ -762,26 +759,28 @@ create_log_files_rename(
checkpoint has been created. */
sprintf(logfilename + dirnamelen, "ib_logfile%u", 0);
- RECOVERY_CRASH(9);
-
ib_logf(IB_LOG_LEVEL_INFO,
"Renaming log file %s to %s", logfile0, logfilename);
mutex_enter(&log_sys->mutex);
ut_ad(strlen(logfile0) == 2 + strlen(logfilename));
- ibool success = os_file_rename(
- innodb_file_log_key, logfile0, logfilename);
- ut_a(success);
-
- RECOVERY_CRASH(10);
+ dberr_t err = os_file_rename(
+ innodb_file_log_key, logfile0, logfilename)
+ ? DB_SUCCESS : DB_ERROR;
/* Replace the first file with ib_logfile0. */
strcpy(logfile0, logfilename);
mutex_exit(&log_sys->mutex);
- fil_open_log_and_system_tablespace_files();
+ DBUG_EXECUTE_IF("innodb_log_abort_10", err = DB_ERROR;);
- ib_logf(IB_LOG_LEVEL_WARN, "New log files created, LSN=" LSN_PF, lsn);
+ if (err == DB_SUCCESS) {
+ fil_open_log_and_system_tablespace_files();
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "New log files created, LSN=" LSN_PF, lsn);
+ }
+
+ return(err);
}
/*********************************************************************//**
@@ -1613,8 +1612,6 @@ innobase_start_or_create_for_mysql(void)
lsn_t max_arch_log_no = LSN_MAX;
#endif /* UNIV_LOG_ARCHIVE */
ulint sum_of_new_sizes;
- ulint sum_of_data_file_sizes;
- ulint tablespace_size_in_header;
dberr_t err;
unsigned i;
ulint srv_n_log_files_found = srv_n_log_files;
@@ -1627,6 +1624,19 @@ innobase_start_or_create_for_mysql(void)
size_t dirnamelen;
bool sys_datafiles_created = false;
+ /* Check that os_fast_mutexes work as expected */
+ os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &srv_os_test_mutex);
+
+ ut_a(0 == os_fast_mutex_trylock(&srv_os_test_mutex));
+
+ os_fast_mutex_unlock(&srv_os_test_mutex);
+
+ os_fast_mutex_lock(&srv_os_test_mutex);
+
+ os_fast_mutex_unlock(&srv_os_test_mutex);
+
+ os_fast_mutex_free(&srv_os_test_mutex);
+
high_level_read_only = srv_read_only_mode
|| srv_force_recovery > SRV_FORCE_NO_TRX_UNDO;
@@ -1669,22 +1679,7 @@ innobase_start_or_create_for_mysql(void)
#endif /* PAGE_ATOMIC_REF_COUNT */
);
-
- if (sizeof(ulint) != sizeof(void*)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: size of InnoDB's ulint is %lu, "
- "but size of void*\n", (ulong) sizeof(ulint));
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: is %lu. The sizes should be the same "
- "so that on a 64-bit\n",
- (ulong) sizeof(void*));
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: platforms you can allocate more than 4 GB "
- "of memory.\n");
- }
+ compile_time_assert(sizeof(ulint) == sizeof(void*));
/* If stacktrace is used we set up signal handler for SIGUSR2 signal
here. If signal handler set fails we report that and disable
@@ -2108,6 +2103,7 @@ innobase_start_or_create_for_mysql(void)
fsp_init();
log_init();
+ log_online_init();
lock_sys_create(srv_lock_table_size);
@@ -2282,14 +2278,18 @@ innobase_start_or_create_for_mysql(void)
dirnamelen, max_flushed_lsn,
logfile0);
+ if (err == DB_SUCCESS) {
+ err = create_log_files_rename(
+ logfilename,
+ dirnamelen,
+ max_flushed_lsn,
+ logfile0);
+ }
+
if (err != DB_SUCCESS) {
return(err);
}
- create_log_files_rename(
- logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
-
/* Suppress the message about
crash recovery. */
max_flushed_lsn = min_flushed_lsn
@@ -2414,8 +2414,9 @@ files_checked:
trx_sys_create();
- if (create_new_db) {
+ bool srv_monitor_thread_started = false;
+ if (create_new_db) {
ut_a(!srv_read_only_mode);
init_log_online();
@@ -2459,8 +2460,12 @@ files_checked:
fil_flush_file_spaces(FIL_TABLESPACE);
- create_log_files_rename(logfilename, dirnamelen,
- max_flushed_lsn, logfile0);
+ err = create_log_files_rename(logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
} else {
/* Check if we support the max format that is stamped
@@ -2489,6 +2494,22 @@ files_checked:
and there must be no page in the buf_flush list. */
buf_pool_invalidate();
+ /* Start monitor thread early enough so that e.g. crash
+ recovery failing to find free pages in the buffer pool is
+ diagnosed. */
+ if (!srv_read_only_mode)
+ {
+ /* Create the thread which prints InnoDB monitor
+ info */
+ thread_handles[4 + SRV_MAX_N_IO_THREADS] =
+ os_thread_create(
+ srv_monitor_thread,
+ NULL,
+ thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+
+ thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
+ }
+
/* We always try to do a recovery, even if the database had
been shut down normally: this is the normal startup path */
@@ -2497,25 +2518,92 @@ files_checked:
min_flushed_lsn, max_flushed_lsn);
if (err != DB_SUCCESS) {
-
- return(DB_ERROR);
+ return(err);
}
init_log_online();
- /* Since the insert buffer init is in dict_boot, and the
- insert buffer is needed in any disk i/o, first we call
- dict_boot(). Note that trx_sys_init_at_db_start() only needs
- to access space 0, and the insert buffer at this stage already
- works for space 0. */
-
+ /* Initialize the change buffer. */
err = dict_boot();
if (err != DB_SUCCESS) {
return(err);
}
+ if (!srv_read_only_mode) {
+ if (sum_of_new_sizes > 0) {
+ /* New data file(s) were added */
+ mtr_start(&mtr);
+ fsp_header_inc_size(0, sum_of_new_sizes, &mtr);
+ mtr_commit(&mtr);
+ /* Immediately write the log record about
+ increased tablespace size to disk, so that it
+ is durable even if mysqld would crash
+ quickly */
+ log_buffer_flush_to_disk();
+ }
+ }
+
+ const ulint tablespace_size_in_header
+ = fsp_header_get_tablespace_size();
+
+#ifdef UNIV_DEBUG
+ /* buf_debug_prints = TRUE; */
+#endif /* UNIV_DEBUG */
+ ulint sum_of_data_file_sizes = 0;
+
+ for (ulint d = 0; d < srv_n_data_files; d++) {
+ sum_of_data_file_sizes += srv_data_file_sizes[d];
+ }
+
+ /* Compare the system tablespace file size to what is
+ stored in FSP_SIZE. In open_or_create_data_files()
+ we already checked that the file sizes match the
+ innodb_data_file_path specification. */
+ if (srv_read_only_mode
+ || sum_of_data_file_sizes == tablespace_size_in_header) {
+ /* Do not complain about the size. */
+ } else if (!srv_auto_extend_last_data_file
+ || sum_of_data_file_sizes
+ < tablespace_size_in_header) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tablespace size stored in header is " ULINTPF
+ " pages, but the sum of data file sizes is "
+ ULINTPF " pages",
+ tablespace_size_in_header,
+ sum_of_data_file_sizes);
+
+ if (srv_force_recovery == 0
+ && sum_of_data_file_sizes
+ < tablespace_size_in_header) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot start InnoDB. The tail of"
+ " the system tablespace is"
+ " missing. Have you edited"
+ " innodb_data_file_path in my.cnf"
+ " in an inappropriate way, removing"
+ " data files from there?"
+ " You can set innodb_force_recovery=1"
+ " in my.cnf to force"
+ " a startup if you are trying to"
+ " recover a badly corrupt database.");
+
+ return(DB_ERROR);
+ }
+ }
+
+ /* This must precede recv_apply_hashed_log_recs(TRUE). */
ib_bh = trx_sys_init_at_db_start();
+
+ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
+ /* Apply the hashed log records to the
+ respective file pages, for the last batch of
+ recv_group_scan_log_recs(). */
+
+ recv_apply_hashed_log_recs(TRUE);
+ DBUG_PRINT("ib_log", ("apply completed"));
+ }
+
n_recovered_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list);
/* The purge system needs to create the purge view and
@@ -2584,7 +2672,8 @@ files_checked:
ULINT_MAX, LSN_MAX, NULL);
ut_a(success);
- RECOVERY_CRASH(1);
+ DBUG_EXECUTE_IF("innodb_log_abort_1",
+ return(DB_ERROR););
min_flushed_lsn = max_flushed_lsn = log_get_lsn();
@@ -2599,8 +2688,6 @@ files_checked:
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
- RECOVERY_CRASH(2);
-
/* Flush the old log files. */
log_buffer_flush_to_disk();
/* If innodb_flush_method=O_DSYNC,
@@ -2615,21 +2702,27 @@ files_checked:
ut_d(recv_no_log_write = TRUE);
ut_ad(!buf_pool_check_no_pending_io());
- RECOVERY_CRASH(3);
+ DBUG_EXECUTE_IF("innodb_log_abort_3",
+ return(DB_ERROR););
/* Stamp the LSN to the data files. */
fil_write_flushed_lsn_to_data_files(
max_flushed_lsn, 0);
- fil_flush_file_spaces(FIL_TABLESPACE);
+ DBUG_EXECUTE_IF("innodb_log_abort_4", err = DB_ERROR;);
- RECOVERY_CRASH(4);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ fil_flush_file_spaces(FIL_TABLESPACE);
/* Close and free the redo log files, so that
we can replace them. */
fil_close_log_files(true);
- RECOVERY_CRASH(5);
+ DBUG_EXECUTE_IF("innodb_log_abort_5",
+ return(DB_ERROR););
/* Free the old log file space. */
log_group_close_all();
@@ -2653,8 +2746,11 @@ files_checked:
fil_write_flushed_lsn_to_data_files(min_flushed_lsn, 0);
fil_flush_file_spaces(FIL_TABLESPACE);
- create_log_files_rename(logfilename, dirnamelen,
- log_get_lsn(), logfile0);
+ err = create_log_files_rename(logfilename, dirnamelen,
+ log_get_lsn(), logfile0);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
}
srv_startup_is_before_trx_rollback_phase = FALSE;
@@ -2668,20 +2764,8 @@ files_checked:
trx_sys_file_format_tag_init();
}
- if (!create_new_db && sum_of_new_sizes > 0) {
- /* New data file(s) were added */
- mtr_start(&mtr);
-
- fsp_header_inc_size(0, sum_of_new_sizes, &mtr);
-
- mtr_commit(&mtr);
-
- /* Immediately write the log record about increased tablespace
- size to disk, so that it is durable even if mysqld would crash
- quickly */
-
- log_buffer_flush_to_disk();
- }
+ ut_ad(err == DB_SUCCESS);
+ ut_a(sum_of_new_sizes != ULINT_UNDEFINED);
#ifdef UNIV_LOG_ARCHIVE
if (!srv_read_only_mode) {
@@ -2760,10 +2844,13 @@ files_checked:
thread_started[3 + SRV_MAX_N_IO_THREADS] = true;
/* Create the thread which prints InnoDB monitor info */
- thread_handles[4 + SRV_MAX_N_IO_THREADS] = os_thread_create(
- srv_monitor_thread,
- NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
- thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
+ if (!thread_started[4 + SRV_MAX_N_IO_THREADS]) {
+ /* srv_monitor_thread not yet started */
+ thread_handles[4 + SRV_MAX_N_IO_THREADS] = os_thread_create(
+ srv_monitor_thread,
+ NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+ thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
+ }
}
/* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */
@@ -2830,125 +2917,6 @@ files_checked:
buf_flush_lru_manager_thread_handle = os_thread_create(buf_flush_lru_manager_thread, NULL, NULL);
buf_flush_lru_manager_thread_started = true;
-#ifdef UNIV_DEBUG
- /* buf_debug_prints = TRUE; */
-#endif /* UNIV_DEBUG */
- sum_of_data_file_sizes = 0;
-
- for (i = 0; i < srv_n_data_files; i++) {
- sum_of_data_file_sizes += srv_data_file_sizes[i];
- }
-
- tablespace_size_in_header = fsp_header_get_tablespace_size();
-
- if (!srv_read_only_mode
- && !srv_auto_extend_last_data_file
- && sum_of_data_file_sizes != tablespace_size_in_header) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: tablespace size"
- " stored in header is %lu pages, but\n",
- (ulong) tablespace_size_in_header);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- "InnoDB: the sum of data file sizes is %lu pages\n",
- (ulong) sum_of_data_file_sizes);
-
- if (srv_force_recovery == 0
- && sum_of_data_file_sizes < tablespace_size_in_header) {
- /* This is a fatal error, the tail of a tablespace is
- missing */
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot start InnoDB."
- " The tail of the system tablespace is\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: missing. Have you edited"
- " innodb_data_file_path in my.cnf in an\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: inappropriate way, removing"
- " ibdata files from there?\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: You can set innodb_force_recovery=1"
- " in my.cnf to force\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: a startup if you are trying"
- " to recover a badly corrupt database.\n");
-
- return(DB_ERROR);
- }
- }
-
- if (!srv_read_only_mode
- && srv_auto_extend_last_data_file
- && sum_of_data_file_sizes < tablespace_size_in_header) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: tablespace size stored in header"
- " is %lu pages, but\n",
- (ulong) tablespace_size_in_header);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: the sum of data file sizes"
- " is only %lu pages\n",
- (ulong) sum_of_data_file_sizes);
-
- if (srv_force_recovery == 0) {
-
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot start InnoDB. The tail of"
- " the system tablespace is\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: missing. Have you edited"
- " innodb_data_file_path in my.cnf in an\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: inappropriate way, removing"
- " ibdata files from there?\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: You can set innodb_force_recovery=1"
- " in my.cnf to force\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: a startup if you are trying to"
- " recover a badly corrupt database.\n");
-
- return(DB_ERROR);
- }
- }
-
- /* Check that os_fast_mutexes work as expected */
- os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &srv_os_test_mutex);
-
- if (0 != os_fast_mutex_trylock(&srv_os_test_mutex)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: pthread_mutex_trylock returns"
- " an unexpected value on\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: success! Cannot continue.\n");
- exit(1);
- }
-
- os_fast_mutex_unlock(&srv_os_test_mutex);
-
- os_fast_mutex_lock(&srv_os_test_mutex);
-
- os_fast_mutex_unlock(&srv_os_test_mutex);
-
- os_fast_mutex_free(&srv_os_test_mutex);
-
if (!srv_file_per_table && srv_pass_corrupt_table) {
fprintf(stderr, "InnoDB: Warning:"
" The option innodb_file_per_table is disabled,"
@@ -3200,6 +3168,7 @@ innobase_shutdown_for_mysql(void)
btr_search_disable();
ibuf_close();
+ log_online_shutdown();
log_shutdown();
trx_sys_file_format_close();
trx_sys_close();
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index e705ed4f587..ca937df4475 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -2,6 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -1349,7 +1350,7 @@ sync_thread_add_level(
case SYNC_TRX_UNDO_PAGE:
/* Purge is allowed to read in as many UNDO pages as it likes,
there was a bogus rule here earlier that forced the caller to
- acquire the purge_sys_t::mutex. The purge mutex did not really
+ acquire the trx_purge_t::mutex. The purge mutex did not really
protect anything because it was only ever acquired by the
single purge thread. The purge thread can read the UNDO pages
without any covering mutex. */
diff --git a/storage/xtradb/trx/trx0purge.cc b/storage/xtradb/trx/trx0purge.cc
index d9e40c5d6f5..6b7b7df4cd8 100644
--- a/storage/xtradb/trx/trx0purge.cc
+++ b/storage/xtradb/trx/trx0purge.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -172,13 +173,9 @@ trx_purge_sys_close(void)
sess_close(purge_sys->sess);
- purge_sys->sess = NULL;
-
read_view_free(purge_sys->prebuilt_view);
read_view_free(purge_sys->prebuilt_clone);
- purge_sys->view = NULL;
-
rw_lock_free(&purge_sys->latch);
mutex_free(&purge_sys->bh_mutex);
@@ -187,9 +184,6 @@ trx_purge_sys_close(void)
ib_bh_free(purge_sys->ib_bh);
os_event_free(purge_sys->event);
-
- purge_sys->event = NULL;
-
mem_free(purge_sys);
purge_sys = NULL;
@@ -1306,20 +1300,16 @@ void
trx_purge_stop(void)
/*================*/
{
- purge_state_t state;
- ib_int64_t sig_count = os_event_reset(purge_sys->event);
-
ut_a(srv_n_purge_threads > 0);
rw_lock_x_lock(&purge_sys->latch);
- ut_a(purge_sys->state != PURGE_STATE_INIT);
- ut_a(purge_sys->state != PURGE_STATE_EXIT);
- ut_a(purge_sys->state != PURGE_STATE_DISABLED);
+ const ib_int64_t sig_count = os_event_reset(purge_sys->event);
+ const purge_state_t state = purge_sys->state;
- ++purge_sys->n_stop;
+ ut_a(state == PURGE_STATE_RUN || state == PURGE_STATE_STOP);
- state = purge_sys->state;
+ ++purge_sys->n_stop;
if (state == PURGE_STATE_RUN) {
ib_logf(IB_LOG_LEVEL_INFO, "Stopping purge");
diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc
index 6f524718052..5126711fb82 100644
--- a/storage/xtradb/trx/trx0sys.cc
+++ b/storage/xtradb/trx/trx0sys.cc
@@ -1333,7 +1333,9 @@ trx_sys_close(void)
ut_a(UT_LIST_GET_LEN(trx_sys->ro_trx_list) == 0);
/* Only prepared transactions may be left in the system. Free them. */
- ut_a(UT_LIST_GET_LEN(trx_sys->rw_trx_list) == trx_sys->n_prepared_trx);
+ ut_a(UT_LIST_GET_LEN(trx_sys->rw_trx_list) == trx_sys->n_prepared_trx
+ || srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
while ((trx = UT_LIST_GET_FIRST(trx_sys->rw_trx_list)) != NULL) {
trx_free_prepared(trx);
@@ -1379,6 +1381,33 @@ trx_sys_close(void)
trx_sys = NULL;
}
+/** @brief Convert an undo log to TRX_UNDO_PREPARED state on shutdown.
+
+If any prepared ACTIVE transactions exist, and their rollback was
+prevented by innodb_force_recovery, we convert these transactions to
+XA PREPARE state in the main-memory data structures, so that shutdown
+will proceed normally. These transactions will again recover as ACTIVE
+on the next restart, and they will be rolled back unless
+innodb_force_recovery prevents it again.
+
+@param[in] trx transaction
+@param[in,out] undo undo log to convert to TRX_UNDO_PREPARED */
+static
+void
+trx_undo_fake_prepared(
+ const trx_t* trx,
+ trx_undo_t* undo)
+{
+ ut_ad(srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
+ ut_ad(trx->is_recovered);
+
+ if (undo != NULL) {
+ ut_ad(undo->state == TRX_UNDO_ACTIVE);
+ undo->state = TRX_UNDO_PREPARED;
+ }
+}
+
/*********************************************************************
Check if there are any active (non-prepared) transactions.
@return total number of active transactions or 0 if none */
@@ -1387,15 +1416,42 @@ ulint
trx_sys_any_active_transactions(void)
/*=================================*/
{
- ulint total_trx = 0;
-
mutex_enter(&trx_sys->mutex);
- total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list)
- + UT_LIST_GET_LEN(trx_sys->mysql_trx_list);
+ ulint total_trx = UT_LIST_GET_LEN(trx_sys->mysql_trx_list);
+
+ if (total_trx == 0) {
+ total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list);
+ ut_a(total_trx >= trx_sys->n_prepared_trx);
+
+ if (total_trx > trx_sys->n_prepared_trx
+ && srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO) {
+ for (trx_t* trx = UT_LIST_GET_FIRST(
+ trx_sys->rw_trx_list);
+ trx != NULL;
+ trx = UT_LIST_GET_NEXT(trx_list, trx)) {
+ if (!trx_state_eq(trx, TRX_STATE_ACTIVE)
+ || !trx->is_recovered) {
+ continue;
+ }
+ /* This was a recovered transaction
+ whose rollback was disabled by
+ the innodb_force_recovery setting.
+ Pretend that it is in XA PREPARE
+ state so that shutdown will work. */
+ trx_undo_fake_prepared(
+ trx, trx->insert_undo);
+ trx_undo_fake_prepared(
+ trx, trx->update_undo);
+ trx->state = TRX_STATE_PREPARED;
+ trx_sys->n_prepared_trx++;
+ trx_sys->n_prepared_recovered_trx++;
+ }
+ }
- ut_a(total_trx >= trx_sys->n_prepared_trx);
- total_trx -= trx_sys->n_prepared_trx;
+ ut_a(total_trx >= trx_sys->n_prepared_trx);
+ total_trx -= trx_sys->n_prepared_trx;
+ }
mutex_exit(&trx_sys->mutex);
diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc
index 97d1f264235..07e122e54ee 100644
--- a/storage/xtradb/trx/trx0trx.cc
+++ b/storage/xtradb/trx/trx0trx.cc
@@ -476,7 +476,11 @@ trx_free_prepared(
/*==============*/
trx_t* trx) /*!< in, own: trx object */
{
- ut_a(trx_state_eq(trx, TRX_STATE_PREPARED));
+ ut_a(trx_state_eq(trx, TRX_STATE_PREPARED)
+ || (trx_state_eq(trx, TRX_STATE_ACTIVE)
+ && trx->is_recovered
+ && (srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO)));
ut_a(trx->magic_n == TRX_MAGIC_N);
lock_trx_release_locks(trx);
diff --git a/storage/xtradb/trx/trx0undo.cc b/storage/xtradb/trx/trx0undo.cc
index cdd23726f2e..220589dd9ff 100644
--- a/storage/xtradb/trx/trx0undo.cc
+++ b/storage/xtradb/trx/trx0undo.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2011,13 +2012,37 @@ trx_undo_free_prepared(
ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS);
if (trx->update_undo) {
- ut_a(trx->update_undo->state == TRX_UNDO_PREPARED);
+ switch (trx->update_undo->state) {
+ case TRX_UNDO_PREPARED:
+ break;
+ case TRX_UNDO_ACTIVE:
+ /* lock_trx_release_locks() assigns
+ trx->is_recovered=false */
+ ut_a(srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ break;
+ default:
+ ut_error;
+ }
+
UT_LIST_REMOVE(undo_list, trx->rseg->update_undo_list,
trx->update_undo);
trx_undo_mem_free(trx->update_undo);
}
if (trx->insert_undo) {
- ut_a(trx->insert_undo->state == TRX_UNDO_PREPARED);
+ switch (trx->insert_undo->state) {
+ case TRX_UNDO_PREPARED:
+ break;
+ case TRX_UNDO_ACTIVE:
+ /* lock_trx_release_locks() assigns
+ trx->is_recovered=false */
+ ut_a(srv_read_only_mode
+ || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ break;
+ default:
+ ut_error;
+ }
+
UT_LIST_REMOVE(undo_list, trx->rseg->insert_undo_list,
trx->insert_undo);
trx_undo_mem_free(trx->insert_undo);