summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2017-03-08 19:44:22 +0200
committerMarko Mäkelä <marko.makela@mariadb.com>2017-03-09 08:53:08 +0200
commitad0c218a440575fa6fb6634aca7a08448a4360e0 (patch)
treeb52811847ce51c92eabdeed3104df8b0168943df /storage
parentbb4ef470c24cdbcedba3dd3dcda3b3d88b6fb491 (diff)
parent9fe92a9770a801c4cd36390620486be4cb06752b (diff)
downloadmariadb-git-ad0c218a440575fa6fb6634aca7a08448a4360e0.tar.gz
Merge 10.0 into 10.1
Also, implement MDEV-11027 a little differently from 5.5 and 10.0: recv_apply_hashed_log_recs(): Change the return type back to void (DB_SUCCESS was always returned). Report progress also via systemd using sd_notifyf().
Diffstat (limited to 'storage')
-rw-r--r--storage/connect/CMakeLists.txt10
-rw-r--r--storage/connect/array.cpp108
-rw-r--r--storage/connect/array.h6
-rw-r--r--storage/connect/colblk.cpp16
-rw-r--r--storage/connect/connect.cc77
-rw-r--r--storage/connect/domdoc.cpp11
-rw-r--r--storage/connect/domdoc.h1
-rw-r--r--storage/connect/filamap.cpp12
-rw-r--r--storage/connect/filamdbf.cpp27
-rw-r--r--storage/connect/filamgz.cpp12
-rw-r--r--storage/connect/filamgz.h12
-rw-r--r--storage/connect/filamzip.cpp561
-rw-r--r--storage/connect/filamzip.h164
-rw-r--r--storage/connect/ha_connect.cc343
-rw-r--r--storage/connect/jdbconn.cpp24
-rw-r--r--storage/connect/json.cpp80
-rw-r--r--storage/connect/jsonudf.cpp109
-rw-r--r--storage/connect/mycat.cc15
-rw-r--r--storage/connect/myconn.cpp20
-rw-r--r--storage/connect/mysql-test/connect/r/xml_zip.result98
-rw-r--r--storage/connect/mysql-test/connect/r/zip.result240
-rw-r--r--storage/connect/mysql-test/connect/std_data/bios.json273
-rw-r--r--storage/connect/mysql-test/connect/std_data/xsample2.xml47
-rw-r--r--storage/connect/mysql-test/connect/t/have_zip.inc19
-rw-r--r--storage/connect/mysql-test/connect/t/xml_zip.test41
-rw-r--r--storage/connect/mysql-test/connect/t/zip.test136
-rw-r--r--storage/connect/odbconn.cpp14
-rw-r--r--storage/connect/plgdbsem.h26
-rw-r--r--storage/connect/plgdbutl.cpp14
-rw-r--r--storage/connect/plgxml.cpp4
-rw-r--r--storage/connect/plgxml.h2
-rw-r--r--storage/connect/plugutil.c3
-rw-r--r--storage/connect/reldef.cpp6
-rw-r--r--storage/connect/reldef.h18
-rw-r--r--storage/connect/tabdos.cpp40
-rw-r--r--storage/connect/tabdos.h8
-rw-r--r--storage/connect/tabext.cpp640
-rw-r--r--storage/connect/tabext.h200
-rw-r--r--storage/connect/tabfix.cpp4
-rw-r--r--storage/connect/tabfix.h2
-rw-r--r--storage/connect/tabfmt.cpp75
-rw-r--r--storage/connect/tabfmt.h5
-rw-r--r--storage/connect/tabjdbc.cpp555
-rw-r--r--storage/connect/tabjdbc.h129
-rw-r--r--storage/connect/tabjson.cpp31
-rw-r--r--storage/connect/tabjson.h4
-rw-r--r--storage/connect/table.cpp249
-rw-r--r--storage/connect/tabmac.cpp2
-rw-r--r--storage/connect/tabmac.h2
-rw-r--r--storage/connect/tabmul.cpp26
-rw-r--r--storage/connect/tabmul.h18
-rw-r--r--storage/connect/tabmysql.cpp183
-rw-r--r--storage/connect/tabmysql.h75
-rw-r--r--storage/connect/taboccur.cpp9
-rw-r--r--storage/connect/tabodbc.cpp321
-rw-r--r--storage/connect/tabodbc.h133
-rw-r--r--storage/connect/tabpivot.cpp7
-rw-r--r--storage/connect/tabpivot.h2
-rw-r--r--storage/connect/tabsys.cpp8
-rw-r--r--storage/connect/tabsys.h4
-rw-r--r--storage/connect/tabtbl.cpp23
-rw-r--r--storage/connect/tabutil.cpp23
-rw-r--r--storage/connect/tabutil.h8
-rw-r--r--storage/connect/tabvct.cpp6
-rw-r--r--storage/connect/tabvct.h2
-rw-r--r--storage/connect/tabvir.cpp2
-rw-r--r--storage/connect/tabwmi.cpp21
-rw-r--r--storage/connect/tabxcl.cpp8
-rw-r--r--storage/connect/tabxcl.h2
-rw-r--r--storage/connect/tabxml.cpp13
-rw-r--r--storage/connect/tabxml.h2
-rw-r--r--storage/connect/tabzip.cpp16
-rw-r--r--storage/connect/tabzip.h2
-rw-r--r--storage/connect/value.h2
-rwxr-xr-xstorage/connect/xindex.cpp8
-rw-r--r--storage/connect/xindex.h4
-rw-r--r--storage/connect/xobject.h3
-rw-r--r--storage/connect/xtable.h211
-rw-r--r--storage/innobase/btr/btr0cur.cc7
-rw-r--r--storage/innobase/dict/dict0dict.cc1
-rw-r--r--storage/innobase/dyn/dyn0dyn.cc1
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc6
-rw-r--r--storage/innobase/include/dict0dict.ic7
-rw-r--r--storage/innobase/include/dyn0dyn.ic8
-rw-r--r--storage/innobase/include/log0recv.h35
-rw-r--r--storage/innobase/include/mach0data.ic13
-rw-r--r--storage/innobase/include/page0page.ic1
-rw-r--r--storage/innobase/log/log0log.cc17
-rw-r--r--storage/innobase/log/log0recv.cc218
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc1
-rw-r--r--storage/innobase/page/page0page.cc2
-rw-r--r--storage/innobase/page/page0zip.cc2
-rw-r--r--storage/innobase/row/row0merge.cc6
-rw-r--r--storage/innobase/row/row0upd.cc4
-rw-r--r--storage/innobase/srv/srv0start.cc10
-rw-r--r--storage/innobase/sync/sync0sync.cc2
-rw-r--r--storage/maria/ha_maria.cc234
-rw-r--r--storage/maria/ma_check.c6
-rw-r--r--storage/maria/ma_create.c57
-rw-r--r--storage/maria/ma_delete_table.c20
-rw-r--r--storage/maria/ma_open.c71
-rw-r--r--storage/maria/ma_static.c6
-rw-r--r--storage/maria/maria_chk.c2
-rw-r--r--storage/maria/maria_def.h3
-rw-r--r--storage/myisam/ha_myisam.cc223
-rw-r--r--storage/myisam/mi_check.c17
-rw-r--r--storage/myisam/mi_create.c53
-rw-r--r--storage/myisam/mi_delete_table.c37
-rw-r--r--storage/myisam/mi_open.c71
-rw-r--r--storage/myisam/mi_static.c8
-rw-r--r--storage/myisam/myisamchk.c2
-rw-r--r--storage/myisam/myisamdef.h3
-rw-r--r--storage/tokudb/CMakeLists.txt2
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.cc23
-rw-r--r--storage/tokudb/PerconaFT/ft/ft-ops.h5
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/recover.cc3
-rw-r--r--storage/tokudb/PerconaFT/ft/node.cc18
-rw-r--r--storage/tokudb/PerconaFT/ft/node.h54
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc3
-rw-r--r--storage/tokudb/PerconaFT/ft/txn/roll.cc3
-rw-r--r--storage/tokudb/PerconaFT/util/dmt.h5
-rw-r--r--storage/tokudb/PerconaFT/util/omt.h2
-rw-r--r--storage/tokudb/ha_tokudb.cc81
-rw-r--r--storage/tokudb/ha_tokudb.h2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result46
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result469
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result43
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test64
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test5
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test5
-rw-r--r--storage/tokudb/mysql-test/tokudb_backup/t/suite.opt2
-rw-r--r--storage/xtradb/btr/btr0btr.cc2
-rw-r--r--storage/xtradb/btr/btr0cur.cc116
-rw-r--r--storage/xtradb/buf/buf0buddy.cc1
-rw-r--r--storage/xtradb/buf/buf0buf.cc9
-rw-r--r--storage/xtradb/buf/buf0lru.cc142
-rw-r--r--storage/xtradb/dict/dict0dict.cc1
-rw-r--r--storage/xtradb/dict/dict0stats.cc13
-rw-r--r--storage/xtradb/dyn/dyn0dyn.cc1
-rw-r--r--storage/xtradb/fsp/fsp0fsp.cc12
-rw-r--r--storage/xtradb/fts/fts0opt.cc7
-rw-r--r--storage/xtradb/handler/ha_innodb.cc103
-rw-r--r--storage/xtradb/handler/ha_innodb.h2
-rw-r--r--storage/xtradb/handler/handler0alter.cc79
-rw-r--r--storage/xtradb/handler/i_s.cc24
-rw-r--r--storage/xtradb/include/btr0cur.h18
-rw-r--r--storage/xtradb/include/btr0sea.h7
-rw-r--r--storage/xtradb/include/btr0sea.ic4
-rw-r--r--storage/xtradb/include/buf0buddy.ic2
-rw-r--r--storage/xtradb/include/buf0buf.h5
-rw-r--r--storage/xtradb/include/dict0dict.h19
-rw-r--r--storage/xtradb/include/dict0dict.ic7
-rw-r--r--storage/xtradb/include/dyn0dyn.h18
-rw-r--r--storage/xtradb/include/dyn0dyn.ic10
-rw-r--r--storage/xtradb/include/log0online.h20
-rw-r--r--storage/xtradb/include/log0recv.h35
-rw-r--r--storage/xtradb/include/mach0data.h16
-rw-r--r--storage/xtradb/include/mach0data.ic13
-rw-r--r--storage/xtradb/include/mtr0mtr.h5
-rw-r--r--storage/xtradb/include/os0file.h5
-rw-r--r--storage/xtradb/include/os0thread.h8
-rw-r--r--storage/xtradb/include/page0page.h20
-rw-r--r--storage/xtradb/include/page0page.ic1
-rw-r--r--storage/xtradb/include/page0zip.h7
-rw-r--r--storage/xtradb/include/rem0rec.h3
-rw-r--r--storage/xtradb/include/row0upd.h5
-rw-r--r--storage/xtradb/include/srv0srv.h9
-rw-r--r--storage/xtradb/include/trx0trx.h2
-rw-r--r--storage/xtradb/include/univ.i4
-rw-r--r--storage/xtradb/log/log0log.cc17
-rw-r--r--storage/xtradb/log/log0online.cc177
-rw-r--r--storage/xtradb/log/log0recv.cc214
-rw-r--r--storage/xtradb/mach/mach0data.cc44
-rw-r--r--storage/xtradb/mtr/mtr0mtr.cc1
-rw-r--r--storage/xtradb/os/os0file.cc53
-rw-r--r--storage/xtradb/os/os0thread.cc28
-rw-r--r--storage/xtradb/page/page0page.cc2
-rw-r--r--storage/xtradb/page/page0zip.cc2
-rw-r--r--storage/xtradb/rem/rem0rec.cc2
-rw-r--r--storage/xtradb/row/row0merge.cc10
-rw-r--r--storage/xtradb/row/row0mysql.cc2
-rw-r--r--storage/xtradb/row/row0purge.cc2
-rw-r--r--storage/xtradb/row/row0upd.cc4
-rw-r--r--storage/xtradb/srv/srv0srv.cc96
-rw-r--r--storage/xtradb/srv/srv0start.cc40
-rw-r--r--storage/xtradb/sync/sync0sync.cc2
-rw-r--r--storage/xtradb/trx/trx0sys.cc66
187 files changed, 5994 insertions, 3149 deletions
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index ce6de424421..a602084b5bd 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -22,16 +22,16 @@ fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h
array.cpp blkfil.cpp colblk.cpp csort.cpp
filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp
filter.cpp json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp
-reldef.cpp tabcol.cpp tabdos.cpp tabfix.cpp tabfmt.cpp tabjson.cpp table.cpp
-tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp
-tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp
+reldef.cpp tabcol.cpp tabdos.cpp tabext.cpp tabfix.cpp tabfmt.cpp tabjson.cpp
+table.cpp tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp
+tabutil.cpp tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp
array.h blkfil.h block.h catalog.h checklvl.h colblk.h connect.h csort.h
engmsg.h filamap.h filamdbf.h filamfix.h filamgz.h filamtxt.h
filter.h global.h ha_connect.h inihandl.h json.h jsonudf.h maputil.h msgid.h
mycat.h myconn.h myutil.h os.h osutil.h plgcnx.h plgdbsem.h preparse.h reldef.h
-resource.h tabcol.h tabdos.h tabfix.h tabfmt.h tabjson.h tabmul.h tabmysql.h
-taboccur.h tabpivot.h tabsys.h tabtbl.h tabutil.h tabvir.h tabxcl.h
+resource.h tabcol.h tabdos.h tabext.h tabfix.h tabfmt.h tabjson.h tabmul.h
+tabmysql.h taboccur.h tabpivot.h tabsys.h tabtbl.h tabutil.h tabvir.h tabxcl.h
user_connect.h valblk.h value.h xindex.h xobject.h xtable.h)
#
diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp
index 193514eeb99..1998ab890e9 100644
--- a/storage/connect/array.cpp
+++ b/storage/connect/array.cpp
@@ -1,7 +1,7 @@
/************* Array C++ Functions Source Code File (.CPP) *************/
/* Name: ARRAY.CPP Version 2.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* This file contains the XOBJECT derived class ARRAY functions. */
/* ARRAY is used for elaborate type of processing, such as sorting */
@@ -141,7 +141,7 @@ PARRAY MakeValueArray(PGLOBAL g, PPARM pp)
/* ARRAY public constructor. */
/***********************************************************************/
ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec)
- : CSORT(FALSE)
+ : CSORT(false)
{
Nval = 0;
Ndif = 0;
@@ -188,14 +188,14 @@ ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec)
else if (type != TYPE_PCHAR)
Value = AllocateValue(g, type, Len, prec);
- Constant = TRUE;
+ Constant = true;
} // end of ARRAY constructor
#if 0
/***********************************************************************/
/* ARRAY public constructor from a QUERY. */
/***********************************************************************/
-ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE)
+ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(false)
{
Type = qryp->GetColType(0);
Nval = qryp->GetNblin();
@@ -206,7 +206,7 @@ ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE)
Xsize = -1;
Len = qryp->GetColLength(0);
X = Inf = Sup = 0;
- Correlated = FALSE;
+ Correlated = false;
switch (Type) {
case TYPE_STRING:
@@ -229,13 +229,13 @@ ARRAY::ARRAY(PGLOBAL g, PQUERY qryp) : CSORT(FALSE)
// The error message was built by ???
Type = TYPE_ERROR;
- Constant = TRUE;
+ Constant = true;
} // end of ARRAY constructor
/***********************************************************************/
/* ARRAY constructor from a TYPE_LIST subarray. */
/***********************************************************************/
-ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(FALSE)
+ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(false)
{
int prec;
LSTBLK *lp;
@@ -260,7 +260,7 @@ ARRAY::ARRAY(PGLOBAL g, PARRAY par, int k) : CSORT(FALSE)
Len = (Type == TYPE_STRING) ? Vblp->GetVlen() : 0;
prec = (Type == TYPE_FLOAT) ? 2 : 0;
Value = AllocateValue(g, Type, Len, prec, NULL);
- Constant = TRUE;
+ Constant = true;
} // end of ARRAY constructor
/***********************************************************************/
@@ -283,7 +283,7 @@ bool ARRAY::AddValue(PGLOBAL g, PSZ strp)
{
if (Type != TYPE_STRING) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "CHAR");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -292,7 +292,7 @@ bool ARRAY::AddValue(PGLOBAL g, PSZ strp)
//Value->SetValue_psz(strp);
//Vblp->SetValue(valp, Nval++);
Vblp->SetValue(strp, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -302,14 +302,14 @@ bool ARRAY::AddValue(PGLOBAL g, void *p)
{
if (Type != TYPE_PCHAR) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "PCHAR");
- return TRUE;
+ return true;
} // endif Type
if (trace)
htrc(" adding pointer(%d): %p\n", Nval, p);
Vblp->SetValue((PSZ)p, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -319,7 +319,7 @@ bool ARRAY::AddValue(PGLOBAL g, short n)
{
if (Type != TYPE_SHORT) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "SHORT");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -328,7 +328,7 @@ bool ARRAY::AddValue(PGLOBAL g, short n)
//Value->SetValue(n);
//Vblp->SetValue(valp, Nval++);
Vblp->SetValue(n, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -338,7 +338,7 @@ bool ARRAY::AddValue(PGLOBAL g, int n)
{
if (Type != TYPE_INT) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "INTEGER");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -347,7 +347,7 @@ bool ARRAY::AddValue(PGLOBAL g, int n)
//Value->SetValue(n);
//Vblp->SetValue(valp, Nval++);
Vblp->SetValue(n, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -357,7 +357,7 @@ bool ARRAY::AddValue(PGLOBAL g, double d)
{
if (Type != TYPE_DOUBLE) {
sprintf(g->Message, MSG(ADD_BAD_TYPE), GetTypeName(Type), "DOUBLE");
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -365,7 +365,7 @@ bool ARRAY::AddValue(PGLOBAL g, double d)
Value->SetValue(d);
Vblp->SetValue(Value, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -376,7 +376,7 @@ bool ARRAY::AddValue(PGLOBAL g, PXOB xp)
if (Type != xp->GetResultType()) {
sprintf(g->Message, MSG(ADD_BAD_TYPE),
GetTypeName(xp->GetResultType()), GetTypeName(Type));
- return TRUE;
+ return true;
} // endif Type
if (trace)
@@ -384,7 +384,7 @@ bool ARRAY::AddValue(PGLOBAL g, PXOB xp)
//AddValue(xp->GetValue());
Vblp->SetValue(xp->GetValue(), Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -395,14 +395,14 @@ bool ARRAY::AddValue(PGLOBAL g, PVAL vp)
if (Type != vp->GetType()) {
sprintf(g->Message, MSG(ADD_BAD_TYPE),
GetTypeName(vp->GetType()), GetTypeName(Type));
- return TRUE;
+ return true;
} // endif Type
if (trace)
htrc(" adding (%d) from vp=%p\n", Nval, vp);
Vblp->SetValue(vp, Nval++);
- return FALSE;
+ return false;
} // end of AddValue
/***********************************************************************/
@@ -423,12 +423,12 @@ bool ARRAY::GetSubValue(PGLOBAL g, PVAL valp, int *kp)
if (Type != TYPE_LIST) {
sprintf(g->Message, MSG(NO_SUB_VAL), Type);
- return TRUE;
+ return true;
} // endif Type
vblp = ((LSTBLK*)Vblp)->Mbvk[kp[0]]->Vblk;
valp->SetValue_pvblk(vblp, kp[1]);
- return FALSE;
+ return false;
} // end of GetSubValue
#endif // 0
@@ -476,11 +476,11 @@ bool ARRAY::Find(PVAL valp)
else if (n > 0)
Inf = X;
else
- return TRUE;
+ return true;
} // endwhile
- return FALSE;
+ return false;
} // end of Find
/***********************************************************************/
@@ -504,9 +504,9 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
int top = Nval - 1;
if (top < 0) // Array is empty
- // Return TRUE for ALL because it means that there are no item that
+ // Return true for ALL because it means that there are no item that
// does not verify the condition, which is true indeed.
- // Return FALSE for ANY because TRUE means that there is at least
+ // Return false for ANY because true means that there is at least
// one item that verifies the condition, which is false.
return opm == 2;
@@ -528,9 +528,9 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
else if (opc == OP_NE && opm == 2)
return !Find(vp);
else if (opc == OP_EQ && opm == 2)
- return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : FALSE;
+ return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : false;
else if (opc == OP_NE && opm == 1)
- return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : TRUE;
+ return (Ndif == 1) ? !(Vcompare(vp, 0) & bt) : true;
if (Type != TYPE_LIST) {
if (opc == OP_GT || opc == OP_GE)
@@ -544,15 +544,15 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm)
if (opm == 2) {
for (i = 0; i < Nval; i++)
if (Vcompare(vp, i) & bt)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
} else { // opm == 1
for (i = 0; i < Nval; i++)
if (!(Vcompare(vp, i) & bt))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
} // endif opm
} // end of FilTest
@@ -566,7 +566,7 @@ bool ARRAY::CanBeShort(void)
int* To_Val = (int*)Valblk->GetMemp();
if (Type != TYPE_INT || !Ndif)
- return FALSE;
+ return false;
// Because the array is sorted, this is true if all the array
// int values are in the range of SHORT values
@@ -582,7 +582,7 @@ bool ARRAY::CanBeShort(void)
int ARRAY::Convert(PGLOBAL g, int k, PVAL vp)
{
int i, prec = 0;
- bool b = FALSE;
+ bool b = false;
PMBV ovblk = Valblk;
PVBLK ovblp = Vblp;
@@ -619,7 +619,7 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp)
if (((DTVAL*)Value)->SetFormat(g, vp))
return TYPE_ERROR;
else
- b = TRUE; // Sort the new array on date internal values
+ b = true; // Sort the new array on date internal values
/*********************************************************************/
/* Do the actual conversion. */
@@ -706,7 +706,7 @@ void ARRAY::SetPrecision(PGLOBAL g, int p)
/***********************************************************************/
/* Sort and eliminate distinct values from an array. */
/* Note: this is done by making a sorted index on distinct values. */
-/* Returns FALSE if Ok or TRUE in case of error. */
+/* Returns false if Ok or true in case of error. */
/***********************************************************************/
bool ARRAY::Sort(PGLOBAL g)
{
@@ -789,14 +789,14 @@ bool ARRAY::Sort(PGLOBAL g)
Bot = -1; // For non optimized search
Top = Ndif; // Find searches the whole array.
- return FALSE;
+ return false;
error:
Nval = Ndif = 0;
Valblk->Free();
PlgDBfree(Index);
PlgDBfree(Offset);
- return TRUE;
+ return true;
} // end of Sort
/***********************************************************************/
@@ -839,9 +839,9 @@ void *ARRAY::GetSortIndex(PGLOBAL g)
/***********************************************************************/
/* Block filter testing for IN operator on Column/Array operands. */
-/* Here we call Find that returns TRUE if the value is in the array */
+/* Here we call Find that returns true if the value is in the array */
/* with X equal to the index of the found value in the array, or */
-/* FALSE if the value is not in the array with Inf and Sup being the */
+/* false if the value is not in the array with Inf and Sup being the */
/* indexes of the array values that are immediately below and over */
/* the not found value. This enables to restrict the array to the */
/* values that are between the min and max block values and to return */
@@ -854,9 +854,9 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
bool bin, bax, pin, pax, veq, all = (opm == 2);
if (Ndif == 0) // Array is empty
- // Return TRUE for ALL because it means that there are no item that
+ // Return true for ALL because it means that there are no item that
// does not verify the condition, which is true indeed.
- // Return FALSE for ANY because TRUE means that there is at least
+ // Return false for ANY because true means that there is at least
// one item that verifies the condition, which is false.
return (all) ? 2 : -2;
else if (opc == OP_EQ && all && Ndif > 1)
@@ -864,7 +864,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
else if (opc == OP_NE && !all && Ndif > 1)
return 2;
// else if (Ndif == 1)
-// all = FALSE;
+// all = false;
// veq is true when all values in the block are equal
switch (Type) {
@@ -874,7 +874,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
case TYPE_SHORT: veq = *(short*)minp == *(short*)maxp; break;
case TYPE_INT: veq = *(int*)minp == *(int*)maxp; break;
case TYPE_DOUBLE: veq = *(double*)minp == *(double*)maxp; break;
- default: veq = FALSE; // Error ?
+ default: veq = false; // Error ?
} // endswitch type
if (!s)
@@ -898,7 +898,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
case OP_GT: return -1; break;
} // endswitch opc
- pax = (opc == OP_GE) ? (X < Ndif - 1) : TRUE;
+ pax = (opc == OP_GE) ? (X < Ndif - 1) : true;
} else if (Inf == Bot) {
// Max value is smaller than min list value
return (opc == OP_LT || opc == OP_LE || opc == OP_NE) ? 1 : -1;
@@ -924,7 +924,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
case OP_LT: return (s) ? -2 : -1; break;
} // endswitch opc
- pin = (opc == OP_LE) ? (X > 0) : TRUE;
+ pin = (opc == OP_LE) ? (X > 0) : true;
} else if (Sup == Ndif) {
// Min value is greater than max list value
if (opc == OP_GT || opc == OP_GE || opc == OP_NE)
@@ -956,7 +956,7 @@ int ARRAY::BlockTest(PGLOBAL, int opc, int opm,
// the only possible overlaps between the array and the block are:
// Array: +-------+ +-------+ +-------+ +-----+
// Block: +-----+ +---+ +------+ +--------+
- // TRUE: pax pin pax pin
+ // true: pax pin pax pin
if (all) switch (opc) {
case OP_GT:
case OP_GE: return (pax) ? -1 : 0; break;
@@ -1052,7 +1052,7 @@ void ARRAY::Print(PGLOBAL, char *ps, uint z)
/***********************************************************************/
/* MULAR public constructor. */
/***********************************************************************/
-MULAR::MULAR(PGLOBAL g, int n) : CSORT(FALSE)
+MULAR::MULAR(PGLOBAL g, int n) : CSORT(false)
{
Narray = n;
Pars = (PARRAY*)PlugSubAlloc(g, NULL, n * sizeof(PARRAY));
@@ -1075,7 +1075,7 @@ int MULAR::Qcompare(int *i1, int *i2)
/***********************************************************************/
/* Sort and eliminate distinct values from multiple arrays. */
/* Note: this is done by making a sorted index on distinct values. */
-/* Returns FALSE if Ok or TRUE in case of error. */
+/* Returns false if Ok or true in case of error. */
/***********************************************************************/
bool MULAR::Sort(PGLOBAL g)
{
@@ -1087,7 +1087,7 @@ bool MULAR::Sort(PGLOBAL g)
for (n = 1; n < Narray; n++)
if (Pars[n]->Nval != nval) {
strcpy(g->Message, MSG(BAD_ARRAY_VAL));
- return TRUE;
+ return true;
} // endif nval
// Prepare non conservative sort with offet values
@@ -1161,10 +1161,10 @@ bool MULAR::Sort(PGLOBAL g)
Pars[n]->Top = ndif; // Find searches the whole array.
} // endfor n
- return FALSE;
+ return false;
error:
PlgDBfree(Index);
PlgDBfree(Offset);
- return TRUE;
+ return true;
} // end of Sort
diff --git a/storage/connect/array.h b/storage/connect/array.h
index 6fb38ae6b47..dfc3638de8a 100644
--- a/storage/connect/array.h
+++ b/storage/connect/array.h
@@ -1,7 +1,7 @@
/**************** Array H Declares Source Code File (.H) ***************/
/* Name: ARRAY.H Version 3.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* This file contains the ARRAY and VALBASE derived classes declares. */
/***********************************************************************/
@@ -53,8 +53,8 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock
using XOBJECT::GetIntValue;
virtual void Reset(void) {Bot = -1;}
virtual int Qcompare(int *, int *);
- virtual bool Compare(PXOB) {assert(FALSE); return FALSE;}
- virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(FALSE); return FALSE;}
+ virtual bool Compare(PXOB) {assert(false); return false;}
+ virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(false); return false;}
//virtual int CheckSpcCol(PTDB, int) {return 0;}
virtual void Print(PGLOBAL g, FILE *f, uint n);
virtual void Print(PGLOBAL g, char *ps, uint z);
diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp
index 80b405be041..58841387249 100644
--- a/storage/connect/colblk.cpp
+++ b/storage/connect/colblk.cpp
@@ -1,7 +1,7 @@
/************* Colblk C++ Functions Source Code File (.CPP) ************/
-/* Name: COLBLK.CPP Version 2.1 */
+/* Name: COLBLK.CPP Version 2.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains the COLBLK class functions. */
/***********************************************************************/
@@ -300,7 +300,7 @@ FIDBLK::FIDBLK(PCOLUMN cp, OPVAL op) : SPCBLK(cp), Op(op)
#if defined(__WIN__)
Format.Prec = 1; // Case insensitive
#endif // __WIN__
- Constant = (!((PTDBASE)To_Tdb)->GetDef()->GetMultiple() &&
+ Constant = (!To_Tdb->GetDef()->GetMultiple() &&
To_Tdb->GetAmType() != TYPE_AM_PLG &&
To_Tdb->GetAmType() != TYPE_AM_PLM);
Fn = NULL;
@@ -312,11 +312,11 @@ FIDBLK::FIDBLK(PCOLUMN cp, OPVAL op) : SPCBLK(cp), Op(op)
/***********************************************************************/
void FIDBLK::ReadColumn(PGLOBAL g)
{
- if (Fn != ((PTDBASE)To_Tdb)->GetFile(g)) {
+ if (Fn != To_Tdb->GetFile(g)) {
char filename[_MAX_PATH];
- Fn = ((PTDBASE)To_Tdb)->GetFile(g);
- PlugSetPath(filename, Fn, ((PTDBASE)To_Tdb)->GetPath());
+ Fn = To_Tdb->GetFile(g);
+ PlugSetPath(filename, Fn, To_Tdb->GetPath());
if (Op != OP_XX) {
char buff[_MAX_PATH];
@@ -378,10 +378,8 @@ void PRTBLK::ReadColumn(PGLOBAL g)
{
if (Pname == NULL) {
char *p;
- PTDBASE tdbp = (PTDBASE)To_Tdb;
-
- Pname = tdbp->GetDef()->GetStringCatInfo(g, "partname", "?");
+ Pname = To_Tdb->GetDef()->GetStringCatInfo(g, "partname", "?");
p = strrchr(Pname, '#');
Value->SetValue_psz((p) ? p + 1 : Pname);
} // endif Pname
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 460d47bcf62..a17c5dafa43 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -157,23 +157,22 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname)
/* Returns valid: true if this is a table info. */
/***********************************************************************/
bool CntInfo(PGLOBAL g, PTDB tp, PXF info)
- {
- bool b;
- PTDBDOS tdbp= (PTDBDOS)tp;
+{
+ if (tp) {
+ bool b = (tp->GetFtype() == RECFM_NAF);
+ PTDBDOS tdbp = b ? NULL : (PTDBDOS)tp;
- if (tdbp) {
- b= tdbp->GetFtype() != RECFM_NAF;
- info->data_file_length= (b) ? (ulonglong)tdbp->GetFileLength(g) : 0;
+ info->data_file_length = (b) ? 0 : (ulonglong)tdbp->GetFileLength(g);
- if (!b || info->data_file_length)
- info->records= (unsigned)tdbp->Cardinality(g);
-// info->records= (unsigned)tdbp->GetMaxSize(g);
+ if (b || info->data_file_length)
+ info->records= (unsigned)tp->Cardinality(g);
+// info->records= (unsigned)tp->GetMaxSize(g);
else
info->records= 0;
// info->mean_rec_length= tdbp->GetLrecl();
info->mean_rec_length= 0;
- info->data_file_name= (b) ? tdbp->GetFile(g) : NULL;
+ info->data_file_name= (b) ? NULL : tdbp->GetFile(g);
return true;
} else {
info->data_file_length= 0;
@@ -183,7 +182,7 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info)
return false;
} // endif tdbp
- } // end of CntInfo
+} // end of CntInfo
/***********************************************************************/
/* GetTDB: Get the table description block of a CONNECT table. */
@@ -332,9 +331,9 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
} // endfor colp
// Attach the updated columns list to the main table
- ((PTDBASE)tdbp)->SetSetCols(utp->GetColumns());
+ tdbp->SetSetCols(utp->GetColumns());
} else if (tdbp && mode == MODE_INSERT)
- ((PTDBASE)tdbp)->SetSetCols(tdbp->GetColumns());
+ tdbp->SetSetCols(tdbp->GetColumns());
// Now do open the physical table
if (trace)
@@ -343,7 +342,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
//tdbp->SetMode(mode);
- if (del/* && ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF*/) {
+ if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) {
// To avoid erasing the table when doing a partial delete
// make a fake Next
// PDOSDEF ddp= new(g) DOSDEF;
@@ -436,7 +435,7 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp)
if (!tdbp)
return RC_FX;
- else if (((PTDBASE)tdbp)->GetKindex()) {
+ else if (tdbp->GetKindex()) {
// Reading sequencially an indexed table. This happens after the
// handler function records_in_range was called and MySQL decides
// to quit using the index (!!!) Drop the index.
@@ -483,7 +482,7 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp)
{
RCODE rc;
PCOL colp;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
if (!tdbp)
return RC_FX;
@@ -501,13 +500,13 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp)
} // endif rc
// Store column values in table write buffer(s)
- for (colp= tp->GetSetCols(); colp; colp= colp->GetNext())
+ for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext())
if (!colp->GetColUse(U_VIRTUAL))
colp->WriteColumn(g);
- if (tp->IsIndexed())
+ if (tdbp->IsIndexed())
// Index values must be sorted before updating
- rc= (RCODE)((PTDBDOS)tp)->GetTxfp()->StoreValues(g, true);
+ rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true);
else
// Return result code from write operation
rc= (RCODE)tdbp->WriteDB(g);
@@ -535,7 +534,7 @@ RCODE CntUpdateRow(PGLOBAL g, PTDB tdbp)
RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
{
RCODE rc;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
if (!tdbp || tdbp->GetMode() != MODE_DELETE)
return RC_FX;
@@ -543,16 +542,16 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
return RC_NF;
if (all) {
- if (((PTDBASE)tdbp)->GetDef()->Indexable())
+ if (tdbp->GetDef()->Indexable())
((PTDBDOS)tdbp)->Cardinal= 0;
// Note: if all, this call will be done when closing the table
rc= (RCODE)tdbp->DeleteDB(g, RC_FX);
-//} else if (tp->GetKindex() && !tp->GetKindex()->IsSorted() &&
-// tp->Txfp->GetAmType() != TYPE_AM_DBF) {
- } else if(tp->IsIndexed()) {
+//} else if (tdbp->GetKindex() && !((PTDBASE)tdbp)->GetKindex()->IsSorted() &&
+// ((PTDBASE)tdbp)->Txfp->GetAmType() != TYPE_AM_DBF) {
+ } else if(tdbp->IsIndexed()) {
// Index values must be sorted before updating
- rc= (RCODE)((PTDBDOS)tp)->GetTxfp()->StoreValues(g, false);
+ rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, false);
} else // Return result code from delete operation
rc= (RCODE)tdbp->DeleteDB(g, RC_OK);
@@ -565,7 +564,7 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all)
int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
{
int rc= RC_OK;
- TDBASE *tbxp= (PTDBASE)tdbp;
+//TDBASE *tbxp= (PTDBASE)tdbp;
if (!tdbp)
return rc; // Nothing to do
@@ -581,13 +580,13 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
tdbp, tdbp->GetMode(), nox, abort);
if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) {
- if (tbxp->IsIndexed())
+ if (tdbp->IsIndexed())
rc= ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g);
if (!rc)
rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine
- } else if (tbxp->GetMode() == MODE_UPDATE && tbxp->IsIndexed())
+ } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed())
rc= ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g);
switch(rc) {
@@ -595,7 +594,7 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
abort= true;
break;
case RC_INFO:
- PushWarning(g, tbxp);
+ PushWarning(g, tdbp);
break;
} // endswitch rc
@@ -631,11 +630,13 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort)
if (trace > 1)
printf("About to reset opt\n");
- // Make all the eventual indexes
- tbxp= (TDBDOX*)tdbp;
- tbxp->ResetKindex(g, NULL);
- tbxp->SetKey_Col(NULL);
- rc= tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
+ if (!tdbp->IsRemote()) {
+ // Make all the eventual indexes
+ PTDBDOX tbxp = (PTDBDOX)tdbp;
+ tbxp->ResetKindex(g, NULL);
+ tbxp->SetKey_Col(NULL);
+ rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1);
+ } // endif remote
err:
if (trace > 1)
@@ -657,10 +658,10 @@ int CntIndexInit(PGLOBAL g, PTDB ptdb, int id, bool sorted)
if (!ptdb)
return -1;
- else if (!((PTDBASE)ptdb)->GetDef()->Indexable()) {
+ else if (!ptdb->GetDef()->Indexable()) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
return 0;
- } else if (((PTDBASE)ptdb)->GetDef()->Indexable() == 3) {
+ } else if (ptdb->GetDef()->Indexable() == 3) {
return 1;
} else
tdbp= (PTDBDOX)ptdb;
@@ -745,7 +746,7 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op,
if (!ptdb)
return RC_FX;
else
- x= ((PTDBASE)ptdb)->GetDef()->Indexable();
+ x= ptdb->GetDef()->Indexable();
if (!x) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
@@ -875,7 +876,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len,
if (!ptdb)
return -1;
- x= ((PTDBASE)ptdb)->GetDef()->Indexable();
+ x= ptdb->GetDef()->Indexable();
if (!x) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName());
diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp
index eb9660b439d..1622ec16c68 100644
--- a/storage/connect/domdoc.cpp
+++ b/storage/connect/domdoc.cpp
@@ -116,7 +116,9 @@ bool DOMDOC::ParseFile(PGLOBAL g, char *fn)
// Parse an in memory document
char *xdoc = GetMemDoc(g, fn);
- b = (xdoc) ? (bool)Docp->loadXML((_bstr_t)xdoc) : false;
+ // This is not equivalent to load for UTF8 characters
+ // It is why get node content is not the same
+ b = (xdoc) ? (bool)Docp->loadXML((_bstr_t)xdoc) : false;
} else
// Load the document
b = (bool)Docp->load((_bstr_t)fn);
@@ -266,6 +268,7 @@ DOMNODE::DOMNODE(PXDOC dp, MSXML2::IXMLDOMNodePtr np) : XMLNODE(dp)
Nodep = np;
Ws = NULL;
Len = 0;
+ Zip = (bool)dp->zip;
} // end of DOMNODE constructor
/******************************************************************/
@@ -316,8 +319,10 @@ RCODE DOMNODE::GetContent(PGLOBAL g, char *buf, int len)
RCODE rc = RC_OK;
// Nodep can be null for a missing HTML table column
- if (Nodep) {
- if (!WideCharToMultiByte(CP_UTF8, 0, Nodep->text, -1,
+ if (Nodep) {
+ if (Zip) {
+ strcpy(buf, Nodep->text);
+ } else if (!WideCharToMultiByte(CP_UTF8, 0, Nodep->text, -1,
buf, len, NULL, NULL)) {
DWORD lsr = GetLastError();
diff --git a/storage/connect/domdoc.h b/storage/connect/domdoc.h
index cfec98a9422..7f269002d59 100644
--- a/storage/connect/domdoc.h
+++ b/storage/connect/domdoc.h
@@ -93,6 +93,7 @@ class DOMNODE : public XMLNODE {
char Name[64];
WCHAR *Ws;
int Len;
+ bool Zip;
}; // end of class DOMNODE
/******************************************************************/
diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp
index 94c562a9981..8fffaca3d06 100644
--- a/storage/connect/filamap.cpp
+++ b/storage/connect/filamap.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -45,6 +45,7 @@
#include "maputil.h"
#include "filamap.h"
#include "tabdos.h"
+#include "tabfmt.h"
/* --------------------------- Class MAPFAM -------------------------- */
@@ -322,17 +323,20 @@ int MAPFAM::ReadBuffer(PGLOBAL g)
int rc, len;
// Are we at the end of the memory
- if (Mempos >= Top)
+ if (Mempos >= Top) {
if ((rc = GetNext(g)) != RC_OK)
return rc;
+ else if (Tdbp->GetAmType() == TYPE_AM_CSV && ((PTDBCSV)Tdbp)->Header)
+ if ((rc = SkipRecord(g, true)) != RC_OK)
+ return rc;
+
+ } // endif Mempos
if (!Placed) {
/*******************************************************************/
/* Record file position in case of UPDATE or DELETE. */
/*******************************************************************/
- int rc;
-
next:
Fpos = Mempos;
CurBlk = (int)Rows++;
diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp
index a4557facbd8..9feb61d7d61 100644
--- a/storage/connect/filamdbf.cpp
+++ b/storage/connect/filamdbf.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -281,15 +281,25 @@ PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info)
/************************************************************************/
switch (thisfield.Type) {
case 'C': // Characters
- case 'L': // Logical 'T' or 'F'
- type = TYPE_STRING;
+ case 'L': // Logical 'T' or 'F' or space
+ type = TYPE_STRING;
+ break;
+ case 'M': // Memo a .DBT block number
+ case 'B': // Binary a .DBT block number
+ case 'G': // Ole a .DBT block number
+ type = TYPE_STRING;
break;
+ //case 'I': // Long
+ //case '+': // Autoincrement
+ // type = TYPE_INT;
+ // break;
case 'N':
type = (thisfield.Decimals) ? TYPE_DOUBLE
: (len > 10) ? TYPE_BIGINT : TYPE_INT;
break;
- case 'F':
- type = TYPE_DOUBLE;
+ case 'F': // Float
+ //case 'O': // Double
+ type = TYPE_DOUBLE;
break;
case 'D':
type = TYPE_DATE; // Is this correct ???
@@ -441,6 +451,7 @@ int DBFFAM::Cardinality(PGLOBAL g)
if (Accept) {
Lrecl = rln;
+ Blksize = Nrec * rln;
PushWarning(g, Tdbp);
} else
return -1;
@@ -582,6 +593,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
if (Accept) {
Lrecl = reclen;
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return true;
@@ -598,7 +610,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
header->Filedate[1] = datm->tm_mon + 1;
header->Filedate[2] = datm->tm_mday;
header->SetHeadlen((ushort)hlen);
- header->SetReclen((ushort)reclen);
+ header->SetReclen(reclen);
descp = (DESCRIPTOR*)header;
// Currently only standard Xbase types are supported
@@ -664,6 +676,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g)
if (Accept) {
Lrecl = header.Reclen();
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return true;
@@ -956,6 +969,7 @@ int DBMFAM::Cardinality(PGLOBAL g)
if (Accept) {
Lrecl = rln;
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return -1;
@@ -1008,6 +1022,7 @@ bool DBMFAM::AllocateBuffer(PGLOBAL g)
if (Accept) {
Lrecl = hp->Reclen();
+ Blksize = Nrec * Lrecl;
PushWarning(g, Tdbp);
} else
return true;
diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp
index 07242ea633c..dc6f277ee27 100644
--- a/storage/connect/filamgz.cpp
+++ b/storage/connect/filamgz.cpp
@@ -724,20 +724,20 @@ void ZBKFAM::Rewind(void)
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIXFAM::ZIXFAM(PDOSDEF tdp) : ZBKFAM(tdp)
+GZXFAM::GZXFAM(PDOSDEF tdp) : ZBKFAM(tdp)
{
//Block = tdp->GetBlock();
//Last = tdp->GetLast();
Nrec = (tdp->GetElemt()) ? tdp->GetElemt() : DOS_BUFF_LEN;
Blksize = Nrec * Lrecl;
- } // end of ZIXFAM standard constructor
+ } // end of GZXFAM standard constructor
/***********************************************************************/
/* ZIX Cardinality: returns table cardinality in number of rows. */
/* This function can be called with a null argument to test the */
/* availability of Cardinality implementation (1 yes, 0 no). */
/***********************************************************************/
-int ZIXFAM::Cardinality(PGLOBAL g)
+int GZXFAM::Cardinality(PGLOBAL g)
{
if (Last)
return (g) ? (int)((Block - 1) * Nrec + Last) : 1;
@@ -750,7 +750,7 @@ int ZIXFAM::Cardinality(PGLOBAL g)
/* Allocate the line buffer. For mode Delete a bigger buffer has to */
/* be allocated because is it also used to move lines into the file. */
/***********************************************************************/
-bool ZIXFAM::AllocateBuffer(PGLOBAL g)
+bool GZXFAM::AllocateBuffer(PGLOBAL g)
{
Buflen = Blksize;
To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen);
@@ -788,7 +788,7 @@ bool ZIXFAM::AllocateBuffer(PGLOBAL g)
/***********************************************************************/
/* ReadBuffer: Read one line from a compressed text file. */
/***********************************************************************/
-int ZIXFAM::ReadBuffer(PGLOBAL g)
+int GZXFAM::ReadBuffer(PGLOBAL g)
{
int n, rc = RC_OK;
@@ -850,7 +850,7 @@ int ZIXFAM::ReadBuffer(PGLOBAL g)
/* WriteDB: Data Base write routine for ZDOS access method. */
/* Update is not possible without using a temporary file (NIY). */
/***********************************************************************/
-int ZIXFAM::WriteBuffer(PGLOBAL g)
+int GZXFAM::WriteBuffer(PGLOBAL g)
{
/*********************************************************************/
/* In Insert mode, blocs are added sequentialy to the file end. */
diff --git a/storage/connect/filamgz.h b/storage/connect/filamgz.h
index d667fdddcc2..7a00c0d4bc7 100644
--- a/storage/connect/filamgz.h
+++ b/storage/connect/filamgz.h
@@ -12,7 +12,7 @@
typedef class GZFAM *PGZFAM;
typedef class ZBKFAM *PZBKFAM;
-typedef class ZIXFAM *PZIXFAM;
+typedef class GZXFAM *PZIXFAM;
typedef class ZLBFAM *PZLBFAM;
/***********************************************************************/
@@ -101,16 +101,16 @@ class DllExport ZBKFAM : public GZFAM {
/* length files compressed using the gzip library functions. */
/* The file is always accessed by block. */
/***********************************************************************/
-class DllExport ZIXFAM : public ZBKFAM {
+class DllExport GZXFAM : public ZBKFAM {
public:
// Constructor
- ZIXFAM(PDOSDEF tdp);
- ZIXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {}
+ GZXFAM(PDOSDEF tdp);
+ GZXFAM(PZIXFAM txfp) : ZBKFAM(txfp) {}
// Implementation
virtual int GetNextPos(void) {return 0;}
virtual PTXF Duplicate(PGLOBAL g)
- {return (PTXF)new(g) ZIXFAM(this);}
+ {return (PTXF)new(g) GZXFAM(this);}
// Methods
virtual int Cardinality(PGLOBAL g);
@@ -120,7 +120,7 @@ class DllExport ZIXFAM : public ZBKFAM {
protected:
// No additional Members
- }; // end of class ZIXFAM
+ }; // end of class GZXFAM
/***********************************************************************/
/* This is the DOS/UNIX Access Method class declaration for PlugDB */
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index 6aca4631f32..3d157da5e87 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -1,11 +1,11 @@
/*********** File AM Zip C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMZIP */
/* ------------- */
-/* Version 1.0 */
+/* Version 1.1 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -19,13 +19,16 @@
#include "my_global.h"
#if !defined(__WIN__)
#if defined(UNIX)
+#include <fnmatch.h>
#include <errno.h>
+#include <dirent.h>
#include <unistd.h>
#else // !UNIX
#include <io.h>
#endif // !UNIX
#include <fcntl.h>
#endif // !__WIN__
+#include <time.h>
/***********************************************************************/
/* Include application header files: */
@@ -40,12 +43,346 @@
//#include "tabzip.h"
#include "filamzip.h"
+#define WRITEBUFFERSIZE (16384)
+
+bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul);
+
+/***********************************************************************/
+/* Compress a file in zip when creating a table. */
+/***********************************************************************/
+static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf)
+{
+ int rc = RC_OK, size_read, size_buf = WRITEBUFFERSIZE;
+ FILE *fin;
+
+ if (zutp->addEntry(g, entry))
+ return true;
+ else if (!(fin = fopen(fn, "rb"))) {
+ sprintf(g->Message, "error in opening %s for reading", fn);
+ return true;
+ } // endif fin
+
+ do {
+ size_read = (int)fread(buf, 1, size_buf, fin);
+
+ if (size_read < size_buf && feof(fin) == 0) {
+ sprintf(g->Message, "error in reading %s", fn);
+ rc = RC_FX;
+ } // endif size_read
+
+ if (size_read > 0) {
+ rc = zutp->writeEntry(g, buf, size_read);
+
+ if (rc == RC_FX)
+ sprintf(g->Message, "error in writing %s in the zipfile", fn);
+
+ } // endif size_read
+
+ } while (rc == RC_OK && size_read > 0);
+
+ fclose(fin);
+ zutp->closeEntry();
+ return rc != RC_OK;
+} // end of ZipFile
+
+/***********************************************************************/
+/* Find and Compress several files in zip when creating a table. */
+/***********************************************************************/
+static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf)
+{
+ char filename[_MAX_PATH];
+ int rc;
+
+ /*********************************************************************/
+ /* pat is a multiple file name with wildcard characters */
+ /*********************************************************************/
+ strcpy(filename, pat);
+
+#if defined(__WIN__)
+ char drive[_MAX_DRIVE], direc[_MAX_DIR];
+ WIN32_FIND_DATA FileData;
+ HANDLE hSearch;
+
+ _splitpath(filename, drive, direc, NULL, NULL);
+
+ // Start searching files in the target directory.
+ hSearch = FindFirstFile(filename, &FileData);
+
+ if (hSearch == INVALID_HANDLE_VALUE) {
+ rc = GetLastError();
+
+ if (rc != ERROR_FILE_NOT_FOUND) {
+ FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, GetLastError(), 0, (LPTSTR)&filename, sizeof(filename), NULL);
+ sprintf(g->Message, MSG(BAD_FILE_HANDLE), filename);
+ return true;
+ } else {
+ strcpy(g->Message, "Cannot find any file to load");
+ return true;
+ } // endif rc
+
+ } // endif hSearch
+
+ while (true) {
+ if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName);
+
+ if (ZipFile(g, zutp, filename, FileData.cFileName, buf)) {
+ FindClose(hSearch);
+ return true;
+ } // endif ZipFile
+
+ } // endif dwFileAttributes
+
+ if (!FindNextFile(hSearch, &FileData)) {
+ rc = GetLastError();
+
+ if (rc != ERROR_NO_MORE_FILES) {
+ sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc);
+ FindClose(hSearch);
+ return true;
+ } // endif rc
+
+ break;
+ } // endif FindNextFile
+
+ } // endwhile n
+
+ // Close the search handle.
+ if (!FindClose(hSearch)) {
+ strcpy(g->Message, MSG(SRCH_CLOSE_ERR));
+ return true;
+ } // endif FindClose
+
+#else // !__WIN__
+ struct stat fileinfo;
+ char fn[FN_REFLEN], direc[FN_REFLEN], pattern[FN_HEADLEN], ftype[FN_EXTLEN];
+ DIR *dir;
+ struct dirent *entry;
+
+ _splitpath(filename, NULL, direc, pattern, ftype);
+ strcat(pattern, ftype);
+
+ // Start searching files in the target directory.
+ if (!(dir = opendir(direc))) {
+ sprintf(g->Message, MSG(BAD_DIRECTORY), direc, strerror(errno));
+ return true;
+ } // endif dir
+
+ while ((entry = readdir(dir))) {
+ strcat(strcpy(fn, direc), entry->d_name);
+
+ if (lstat(fn, &fileinfo) < 0) {
+ sprintf(g->Message, "%s: %s", fn, strerror(errno));
+ return true;
+ } else if (!S_ISREG(fileinfo.st_mode))
+ continue; // Not a regular file (should test for links)
+
+ /*******************************************************************/
+ /* Test whether the file name matches the table name filter. */
+ /*******************************************************************/
+ if (fnmatch(pattern, entry->d_name, 0))
+ continue; // Not a match
+
+ strcat(strcpy(filename, direc), entry->d_name);
+
+ if (ZipFile(g, zutp, filename, entry->d_name, buf)) {
+ closedir(dir);
+ return true;
+ } // endif ZipFile
+
+ } // endwhile readdir
+
+ // Close the dir handle.
+ closedir(dir);
+#endif // !__WIN__
+
+ return false;
+} // end of ZipFiles
+
+/***********************************************************************/
+/* Load and Compress a file in zip when creating a table. */
+/***********************************************************************/
+bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul)
+{
+ char *buf;
+ bool err;
+ ZIPUTIL *zutp = new(g) ZIPUTIL(NULL);
+
+ if (zutp->open(g, zfn, append))
+ return true;
+
+ buf = (char*)PlugSubAlloc(g, NULL, WRITEBUFFERSIZE);
+
+ if (mul)
+ err = ZipFiles(g, zutp, fn, buf);
+ else
+ err = ZipFile(g, zutp, fn, entry, buf);
+
+ zutp->close();
+ return err;
+} // end of ZipLoadFile
+
/* -------------------------- class ZIPUTIL -------------------------- */
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIPUTIL::ZIPUTIL(PSZ tgt, bool mul)
+ZIPUTIL::ZIPUTIL(PSZ tgt)
+{
+ zipfile = NULL;
+ target = tgt;
+ fp = NULL;
+ entryopen = false;
+} // end of ZIPUTIL standard constructor
+
+#if 0
+ZIPUTIL::ZIPUTIL(ZIPUTIL *zutp)
+{
+ zipfile = zutp->zipfile;
+ target = zutp->target;
+ fp = zutp->fp;
+ entryopen = zutp->entryopen;
+} // end of UNZIPUTL copy constructor
+#endif // 0
+
+/***********************************************************************/
+/* Fill the zip time structure */
+/* param: tmZip time structure to be filled */
+/***********************************************************************/
+void ZIPUTIL::getTime(tm_zip& tmZip)
+{
+ time_t rawtime;
+ time(&rawtime);
+ struct tm *timeinfo = localtime(&rawtime);
+ tmZip.tm_sec = timeinfo->tm_sec;
+ tmZip.tm_min = timeinfo->tm_min;
+ tmZip.tm_hour = timeinfo->tm_hour;
+ tmZip.tm_mday = timeinfo->tm_mday;
+ tmZip.tm_mon = timeinfo->tm_mon;
+ tmZip.tm_year = timeinfo->tm_year;
+} // end of getTime
+
+/***********************************************************************/
+/* open a zip file for deflate. */
+/* param: filename path and the filename of the zip file to open. */
+/* append: set true to append the zip file */
+/* return: true if open, false otherwise. */
+/***********************************************************************/
+bool ZIPUTIL::open(PGLOBAL g, char *filename, bool append)
+{
+ if (!zipfile && !(zipfile = zipOpen64(filename,
+ append ? APPEND_STATUS_ADDINZIP
+ : APPEND_STATUS_CREATE)))
+ sprintf(g->Message, "Zipfile open error on %s", filename);
+
+ return (zipfile == NULL);
+} // end of open
+
+/***********************************************************************/
+/* Close the zip file. */
+/***********************************************************************/
+void ZIPUTIL::close()
+{
+ if (zipfile) {
+ closeEntry();
+ zipClose(zipfile, 0);
+ zipfile = NULL;
+ } // endif zipfile
+
+} // end of close
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/***********************************************************************/
+bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append)
+{
+ /*********************************************************************/
+ /* The file will be compressed. */
+ /*********************************************************************/
+ if (mode == MODE_INSERT) {
+ bool b = open(g, fn, append);
+
+ if (!b) {
+ if (addEntry(g, target))
+ return true;
+
+ /*****************************************************************/
+ /* Link a Fblock. This make possible to automatically close it */
+ /* in case of error g->jump. */
+ /*****************************************************************/
+ PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr;
+
+ fp = (PFBLOCK)PlugSubAlloc(g, NULL, sizeof(FBLOCK));
+ fp->Type = TYPE_FB_ZIP;
+ fp->Fname = PlugDup(g, fn);
+ fp->Next = dbuserp->Openlist;
+ dbuserp->Openlist = fp;
+ fp->Count = 1;
+ fp->Length = 0;
+ fp->Memory = NULL;
+ fp->Mode = mode;
+ fp->File = this;
+ fp->Handle = 0;
+ } else
+ return true;
+
+ } else {
+ strcpy(g->Message, "Only INSERT mode supported for ZIPPING files");
+ return true;
+ } // endif mode
+
+ return false;
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* Add target in zip file. */
+/***********************************************************************/
+bool ZIPUTIL::addEntry(PGLOBAL g, char *entry)
+{
+ //?? we dont need the stinking time
+ zip_fileinfo zi = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+ getTime(zi.tmz_date);
+ target = entry;
+
+ int err = zipOpenNewFileInZip(zipfile, target, &zi,
+ NULL, 0, NULL, 0, NULL, Z_DEFLATED, Z_DEFAULT_COMPRESSION);
+
+ return !(entryopen = (err == ZIP_OK));
+} // end of addEntry
+
+/***********************************************************************/
+/* writeEntry: Deflate the buffer to the zip file. */
+/***********************************************************************/
+int ZIPUTIL::writeEntry(PGLOBAL g, char *buf, int len)
+{
+ if (zipWriteInFileInZip(zipfile, buf, len) < 0) {
+ sprintf(g->Message, "Error writing %s in the zipfile", target);
+ return RC_FX;
+ } // endif zipWriteInFileInZip
+
+ return RC_OK;
+} // end of writeEntry
+
+/***********************************************************************/
+/* Close the zip file. */
+/***********************************************************************/
+void ZIPUTIL::closeEntry()
+{
+ if (entryopen) {
+ zipCloseFileInZip(zipfile);
+ entryopen = false;
+ } // endif entryopen
+
+} // end of closeEntry
+
+/* ------------------------- class UNZIPUTL -------------------------- */
+
+/***********************************************************************/
+/* Constructors. */
+/***********************************************************************/
+UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul)
{
zipfile = NULL;
target = tgt;
@@ -62,10 +399,10 @@ ZIPUTIL::ZIPUTIL(PSZ tgt, bool mul)
#else
for (int i = 0; i < 256; ++i) mapCaseTable[i] = i;
#endif
-} // end of ZIPUTIL standard constructor
+} // end of UNZIPUTL standard constructor
#if 0
-ZIPUTIL::ZIPUTIL(PZIPUTIL zutp)
+UNZIPUTL::UNZIPUTL(PZIPUTIL zutp)
{
zipfile = zutp->zipfile;
target = zutp->target;
@@ -74,14 +411,14 @@ ZIPUTIL::ZIPUTIL(PZIPUTIL zutp)
entryopen = zutp->entryopen;
multiple = zutp->multiple;
for (int i = 0; i < 256; ++i) mapCaseTable[i] = zutp->mapCaseTable[i];
-} // end of ZIPUTIL copy constructor
+} // end of UNZIPUTL copy constructor
#endif // 0
/***********************************************************************/
/* This code is the copyright property of Alessandro Felice Cantatore. */
/* http://xoomer.virgilio.it/acantato/dev/wildcard/wildmatch.html */
/***********************************************************************/
-bool ZIPUTIL::WildMatch(PSZ pat, PSZ str) {
+bool UNZIPUTL::WildMatch(PSZ pat, PSZ str) {
PSZ s, p;
bool star = FALSE;
@@ -116,7 +453,7 @@ starCheck:
/* param: filename path and the filename of the zip file to open. */
/* return: true if open, false otherwise. */
/***********************************************************************/
-bool ZIPUTIL::open(PGLOBAL g, char *filename)
+bool UNZIPUTL::open(PGLOBAL g, char *filename)
{
if (!zipfile && !(zipfile = unzOpen64(filename)))
sprintf(g->Message, "Zipfile open error on %s", filename);
@@ -127,7 +464,7 @@ bool ZIPUTIL::open(PGLOBAL g, char *filename)
/***********************************************************************/
/* Close the zip file. */
/***********************************************************************/
-void ZIPUTIL::close()
+void UNZIPUTL::close()
{
if (zipfile) {
closeEntry();
@@ -140,7 +477,7 @@ void ZIPUTIL::close()
/***********************************************************************/
/* Find next entry matching target pattern. */
/***********************************************************************/
-int ZIPUTIL::findEntry(PGLOBAL g, bool next)
+int UNZIPUTL::findEntry(PGLOBAL g, bool next)
{
int rc;
@@ -183,7 +520,7 @@ int ZIPUTIL::findEntry(PGLOBAL g, bool next)
/***********************************************************************/
/* Get the next used entry. */
/***********************************************************************/
-int ZIPUTIL::nextEntry(PGLOBAL g)
+int UNZIPUTL::nextEntry(PGLOBAL g)
{
if (multiple) {
int rc;
@@ -206,7 +543,7 @@ int ZIPUTIL::nextEntry(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn)
+bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn)
{
/*********************************************************************/
/* The file will be decompressed into virtual memory. */
@@ -268,7 +605,7 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn)
return true;
} else {
- strcpy(g->Message, "Only READ mode supported for ZIP files");
+ strcpy(g->Message, "Only READ mode supported for ZIPPED tables");
return true;
} // endif mode
@@ -278,7 +615,7 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn)
/***********************************************************************/
/* Open target in zip file. */
/***********************************************************************/
-bool ZIPUTIL::openEntry(PGLOBAL g)
+bool UNZIPUTL::openEntry(PGLOBAL g)
{
int rc;
@@ -316,7 +653,7 @@ bool ZIPUTIL::openEntry(PGLOBAL g)
/***********************************************************************/
/* Close the zip file. */
/***********************************************************************/
-void ZIPUTIL::closeEntry()
+void UNZIPUTL::closeEntry()
{
if (entryopen) {
unzCloseCurrentFile(zipfile);
@@ -330,36 +667,29 @@ void ZIPUTIL::closeEntry()
} // end of closeEntry
-/* -------------------------- class ZIPFAM --------------------------- */
+/* -------------------------- class UNZFAM --------------------------- */
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZIPFAM::ZIPFAM(PDOSDEF tdp) : MAPFAM(tdp)
+UNZFAM::UNZFAM(PDOSDEF tdp) : MAPFAM(tdp)
{
zutp = NULL;
target = tdp->GetEntry();
mul = tdp->GetMul();
-} // end of ZIPFAM standard constructor
-
-ZIPFAM::ZIPFAM(PZIPFAM txfp) : MAPFAM(txfp)
-{
- zutp = txfp->zutp;
- target = txfp->target;
- mul = txfp->mul;
-} // end of ZIPFAM copy constructor
+} // end of UNZFAM standard constructor
-ZIPFAM::ZIPFAM(PDOSDEF tdp, PZPXFAM txfp) : MAPFAM(tdp)
+UNZFAM::UNZFAM(PUNZFAM txfp) : MAPFAM(txfp)
{
zutp = txfp->zutp;
target = txfp->target;
mul = txfp->mul;
-} // end of ZIPFAM constructor used in ResetTableOpt
+} // end of UNZFAM copy constructor
/***********************************************************************/
/* ZIP GetFileLength: returns file size in number of bytes. */
/***********************************************************************/
-int ZIPFAM::GetFileLength(PGLOBAL g)
+int UNZFAM::GetFileLength(PGLOBAL g)
{
int len = (zutp && zutp->entryopen) ? Top - Memory
: TXTFAM::GetFileLength(g) * 3;
@@ -373,7 +703,7 @@ int ZIPFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/
/* ZIP Cardinality: return the number of rows if possible. */
/***********************************************************************/
-int ZIPFAM::Cardinality(PGLOBAL g)
+int UNZFAM::Cardinality(PGLOBAL g)
{
if (!g)
return 1;
@@ -388,7 +718,7 @@ int ZIPFAM::Cardinality(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZIPFAM::OpenTableFile(PGLOBAL g)
+bool UNZFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
MODE mode = Tdbp->GetMode();
@@ -396,7 +726,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
/*********************************************************************/
/* Allocate the ZIP utility class. */
/*********************************************************************/
- zutp = new(g) ZIPUTIL(target, mul);
+ zutp = new(g) UNZIPUTL(target, mul);
// We used the file name relative to recorded datapath
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -415,7 +745,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
/***********************************************************************/
/* GetNext: go to next entry. */
/***********************************************************************/
-int ZIPFAM::GetNext(PGLOBAL g)
+int UNZFAM::GetNext(PGLOBAL g)
{
int rc = zutp->nextEntry(g);
@@ -431,7 +761,7 @@ int ZIPFAM::GetNext(PGLOBAL g)
/***********************************************************************/
/* ReadBuffer: Read one line for a ZIP file. */
/***********************************************************************/
-int ZIPFAM::ReadBuffer(PGLOBAL g)
+int UNZFAM::ReadBuffer(PGLOBAL g)
{
int rc, len;
@@ -497,37 +827,37 @@ int ZIPFAM::ReadBuffer(PGLOBAL g)
/***********************************************************************/
/* Table file close routine for MAP access method. */
/***********************************************************************/
-void ZIPFAM::CloseTableFile(PGLOBAL g, bool)
+void UNZFAM::CloseTableFile(PGLOBAL g, bool)
{
close();
} // end of CloseTableFile
#endif // 0
-/* -------------------------- class ZPXFAM --------------------------- */
+/* -------------------------- class UZXFAM --------------------------- */
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-ZPXFAM::ZPXFAM(PDOSDEF tdp) : MPXFAM(tdp)
+UZXFAM::UZXFAM(PDOSDEF tdp) : MPXFAM(tdp)
{
zutp = NULL;
target = tdp->GetEntry();
mul = tdp->GetMul();
//Lrecl = tdp->GetLrecl();
-} // end of ZPXFAM standard constructor
+} // end of UZXFAM standard constructor
-ZPXFAM::ZPXFAM(PZPXFAM txfp) : MPXFAM(txfp)
+UZXFAM::UZXFAM(PUZXFAM txfp) : MPXFAM(txfp)
{
zutp = txfp->zutp;
target = txfp->target;
mul = txfp->mul;
//Lrecl = txfp->Lrecl;
-} // end of ZPXFAM copy constructor
+} // end of UZXFAM copy constructor
/***********************************************************************/
/* ZIP GetFileLength: returns file size in number of bytes. */
/***********************************************************************/
-int ZPXFAM::GetFileLength(PGLOBAL g)
+int UZXFAM::GetFileLength(PGLOBAL g)
{
int len;
@@ -545,7 +875,7 @@ int ZPXFAM::GetFileLength(PGLOBAL g)
/***********************************************************************/
/* ZIP Cardinality: return the number of rows if possible. */
/***********************************************************************/
-int ZPXFAM::Cardinality(PGLOBAL g)
+int UZXFAM::Cardinality(PGLOBAL g)
{
if (!g)
return 1;
@@ -566,7 +896,7 @@ int ZPXFAM::Cardinality(PGLOBAL g)
/***********************************************************************/
/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
/***********************************************************************/
-bool ZPXFAM::OpenTableFile(PGLOBAL g)
+bool UZXFAM::OpenTableFile(PGLOBAL g)
{
// May have been already opened in GetFileLength
if (!zutp || !zutp->zipfile) {
@@ -577,7 +907,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g)
/* Allocate the ZIP utility class. */
/*********************************************************************/
if (!zutp)
- zutp = new(g)ZIPUTIL(target, mul);
+ zutp = new(g)UNZIPUTL(target, mul);
// We used the file name relative to recorded datapath
PlugSetPath(filename, To_File, Tdbp->GetPath());
@@ -600,7 +930,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g)
/***********************************************************************/
/* GetNext: go to next entry. */
/***********************************************************************/
-int ZPXFAM::GetNext(PGLOBAL g)
+int UZXFAM::GetNext(PGLOBAL g)
{
int rc = zutp->nextEntry(g);
@@ -620,3 +950,146 @@ int ZPXFAM::GetNext(PGLOBAL g)
return RC_OK;
} // end of GetNext
+/* -------------------------- class ZIPFAM --------------------------- */
+
+/***********************************************************************/
+/* Constructor. */
+/***********************************************************************/
+ZIPFAM::ZIPFAM(PDOSDEF tdp) : DOSFAM(tdp)
+{
+ zutp = NULL;
+ target = tdp->GetEntry();
+ append = tdp->GetAppend();
+} // end of ZIPFAM standard constructor
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/***********************************************************************/
+bool ZIPFAM::OpenTableFile(PGLOBAL g)
+{
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+
+ /*********************************************************************/
+ /* Allocate the ZIP utility class. */
+ /*********************************************************************/
+ zutp = new(g) ZIPUTIL(target);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!zutp->OpenTable(g, mode, filename, append)) {
+ To_Fb = zutp->fp; // Useful when closing
+ } else
+ return true;
+
+ return AllocateBuffer(g);
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* ReadBuffer: Read one line for a ZIP file. */
+/***********************************************************************/
+int ZIPFAM::ReadBuffer(PGLOBAL g)
+{
+ strcpy(g->Message, "ReadBuffer should not been called when zipping");
+ return RC_FX;
+} // end of ReadBuffer
+
+/***********************************************************************/
+/* WriteBuffer: Deflate the buffer to the zip file. */
+/***********************************************************************/
+int ZIPFAM::WriteBuffer(PGLOBAL g)
+{
+ int len;
+
+ // Prepare to write the new line
+ strcat(strcpy(To_Buf, Tdbp->GetLine()), (Bin) ? CrLf : "\n");
+ len = strchr(To_Buf, '\n') - To_Buf + 1;
+ return zutp->writeEntry(g, To_Buf, len);
+} // end of WriteBuffer
+
+/***********************************************************************/
+/* Table file close routine for ZIP access method. */
+/***********************************************************************/
+void ZIPFAM::CloseTableFile(PGLOBAL g, bool)
+{
+ To_Fb->Count = 0;
+ zutp->close();
+} // end of CloseTableFile
+
+/* -------------------------- class ZPXFAM --------------------------- */
+
+/***********************************************************************/
+/* Constructor. */
+/***********************************************************************/
+ZPXFAM::ZPXFAM(PDOSDEF tdp) : FIXFAM(tdp)
+{
+ zutp = NULL;
+ target = tdp->GetEntry();
+ append = tdp->GetAppend();
+ //Lrecl = tdp->GetLrecl();
+} // end of UZXFAM standard constructor
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/***********************************************************************/
+bool ZPXFAM::OpenTableFile(PGLOBAL g)
+{
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+
+ /*********************************************************************/
+ /* Allocate the ZIP utility class. */
+ /*********************************************************************/
+ zutp = new(g) ZIPUTIL(target);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!zutp->OpenTable(g, mode, filename, append)) {
+ To_Fb = zutp->fp; // Useful when closing
+ } else
+ return true;
+
+ return AllocateBuffer(g);
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* WriteBuffer: Deflate the buffer to the zip file. */
+/***********************************************************************/
+int ZPXFAM::WriteBuffer(PGLOBAL g)
+{
+ /*********************************************************************/
+ /* In Insert mode, we write only full blocks. */
+ /*********************************************************************/
+ if (++CurNum != Rbuf) {
+ Tdbp->IncLine(Lrecl); // Used by DOSCOL functions
+ return RC_OK;
+ } // endif CurNum
+
+ // Now start the compress process.
+ if (zutp->writeEntry(g, To_Buf, Lrecl * Rbuf) != RC_OK) {
+ Closing = true;
+ return RC_FX;
+ } // endif writeEntry
+
+ CurBlk++;
+ CurNum = 0;
+ Tdbp->SetLine(To_Buf);
+ return RC_OK;
+} // end of WriteBuffer
+
+/***********************************************************************/
+/* Table file close routine for ZIP access method. */
+/***********************************************************************/
+void ZPXFAM::CloseTableFile(PGLOBAL g, bool)
+{
+ if (CurNum && !Closing) {
+ // Some more inserted lines remain to be written
+ Rbuf = CurNum--;
+ WriteBuffer(g);
+ } // endif Curnum
+
+ To_Fb->Count = 0;
+ zutp->close();
+} // end of CloseTableFile
diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h
index 9312fb2f70e..3160703bd20 100644
--- a/storage/connect/filamzip.h
+++ b/storage/connect/filamzip.h
@@ -1,7 +1,7 @@
/************** filamzip H Declares Source Code File (.H) **************/
-/* Name: filamzip.h Version 1.0 */
+/* Name: filamzip.h Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* This file contains the ZIP file access method classes declares. */
/***********************************************************************/
@@ -10,10 +10,14 @@
#include "block.h"
#include "filamap.h"
+#include "filamfix.h"
+#include "zip.h"
#include "unzip.h"
#define DLLEXPORT extern "C"
+typedef class UNZFAM *PUNZFAM;
+typedef class UZXFAM *PUZXFAM;
typedef class ZIPFAM *PZIPFAM;
typedef class ZPXFAM *PZPXFAM;
@@ -21,16 +25,50 @@ typedef class ZPXFAM *PZPXFAM;
/* This is the ZIP utility fonctions class. */
/***********************************************************************/
class DllExport ZIPUTIL : public BLOCK {
-public:
+ public:
// Constructor
- ZIPUTIL(PSZ tgt, bool mul);
-//ZIPUTIL(ZIPUTIL *zutp);
+ ZIPUTIL(PSZ tgt);
+ //ZIPUTIL(ZIPUTIL *zutp);
// Implementation
-//PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)ZIPFAM(this); }
+ //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); }
// Methods
- virtual bool OpenTable(PGLOBAL g, MODE mode, char *fn);
+ bool OpenTable(PGLOBAL g, MODE mode, char *fn, bool append);
+ bool open(PGLOBAL g, char *fn, bool append);
+ bool addEntry(PGLOBAL g, char *entry);
+ void close(void);
+ void closeEntry(void);
+ int writeEntry(PGLOBAL g, char *buf, int len);
+ void getTime(tm_zip& tmZip);
+
+ // Members
+ zipFile zipfile; // The ZIP container file
+ PSZ target; // The target file name
+//unz_file_info finfo; // The current file info
+ PFBLOCK fp;
+//char *memory;
+//uint size;
+//int multiple; // Multiple targets
+ bool entryopen; // True when open current entry
+//char fn[FILENAME_MAX]; // The current entry file name
+//char mapCaseTable[256];
+}; // end of ZIPUTIL
+
+/***********************************************************************/
+/* This is the unZIP utility fonctions class. */
+/***********************************************************************/
+class DllExport UNZIPUTL : public BLOCK {
+ public:
+ // Constructor
+ UNZIPUTL(PSZ tgt, bool mul);
+//UNZIPUTL(UNZIPUTL *zutp);
+
+ // Implementation
+//PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); }
+
+ // Methods
+ bool OpenTable(PGLOBAL g, MODE mode, char *fn);
bool open(PGLOBAL g, char *fn);
bool openEntry(PGLOBAL g);
void close(void);
@@ -50,68 +88,120 @@ public:
bool entryopen; // True when open current entry
char fn[FILENAME_MAX]; // The current entry file name
char mapCaseTable[256];
-}; // end of ZIPFAM
+}; // end of UNZIPUTL
/***********************************************************************/
-/* This is the ZIP file access method. */
+/* This is the unzip file access method. */
/***********************************************************************/
-class DllExport ZIPFAM : public MAPFAM {
- friend class ZPXFAM;
-public:
+class DllExport UNZFAM : public MAPFAM {
+//friend class UZXFAM;
+ public:
// Constructors
- ZIPFAM(PDOSDEF tdp);
- ZIPFAM(PZIPFAM txfp);
- ZIPFAM(PDOSDEF tdp, PZPXFAM txfp);
+ UNZFAM(PDOSDEF tdp);
+ UNZFAM(PUNZFAM txfp);
// Implementation
- virtual AMT GetAmType(void) { return TYPE_AM_ZIP; }
- virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)ZIPFAM(this); }
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+ virtual PTXF Duplicate(PGLOBAL g) {return (PTXF) new(g) UNZFAM(this);}
// Methods
virtual int Cardinality(PGLOBAL g);
virtual int GetFileLength(PGLOBAL g);
-//virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ //virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
virtual bool OpenTableFile(PGLOBAL g);
virtual bool DeferReading(void) { return false; }
virtual int GetNext(PGLOBAL g);
-//virtual int ReadBuffer(PGLOBAL g);
-//virtual int WriteBuffer(PGLOBAL g);
-//virtual int DeleteRecords(PGLOBAL g, int irc);
-//virtual void CloseTableFile(PGLOBAL g, bool abort);
+ //virtual int ReadBuffer(PGLOBAL g);
+ //virtual int WriteBuffer(PGLOBAL g);
+ //virtual int DeleteRecords(PGLOBAL g, int irc);
+ //virtual void CloseTableFile(PGLOBAL g, bool abort);
-protected:
+ protected:
// Members
- ZIPUTIL *zutp;
- PSZ target;
- bool mul;
-}; // end of ZIPFAM
+ UNZIPUTL *zutp;
+ PSZ target;
+ bool mul;
+}; // end of UNZFAM
/***********************************************************************/
-/* This is the fixed ZIP file access method. */
+/* This is the fixed unzip file access method. */
/***********************************************************************/
-class DllExport ZPXFAM : public MPXFAM {
- friend class ZIPFAM;
-public:
+class DllExport UZXFAM : public MPXFAM {
+//friend class UNZFAM;
+ public:
// Constructors
- ZPXFAM(PDOSDEF tdp);
- ZPXFAM(PZPXFAM txfp);
+ UZXFAM(PDOSDEF tdp);
+ UZXFAM(PUZXFAM txfp);
// Implementation
virtual AMT GetAmType(void) { return TYPE_AM_ZIP; }
- virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)ZPXFAM(this); }
+ virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UZXFAM(this); }
// Methods
virtual int GetFileLength(PGLOBAL g);
virtual int Cardinality(PGLOBAL g);
virtual bool OpenTableFile(PGLOBAL g);
virtual int GetNext(PGLOBAL g);
-//virtual int ReadBuffer(PGLOBAL g);
+ //virtual int ReadBuffer(PGLOBAL g);
+
+ protected:
+ // Members
+ UNZIPUTL *zutp;
+ PSZ target;
+ bool mul;
+}; // end of UZXFAM
+
+/***********************************************************************/
+/* This is the zip file access method. */
+/***********************************************************************/
+class DllExport ZIPFAM : public DOSFAM {
+ public:
+ // Constructors
+ ZIPFAM(PDOSDEF tdp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g) {return 0;}
+ virtual int GetFileLength(PGLOBAL g) {return g ? 0 : 1;}
+ //virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ //virtual int DeleteRecords(PGLOBAL g, int irc);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
+
+ protected:
+ // Members
+ ZIPUTIL *zutp;
+ PSZ target;
+ bool append;
+}; // end of ZIPFAM
+
+/***********************************************************************/
+/* This is the fixed zip file access method. */
+/***********************************************************************/
+class DllExport ZPXFAM : public FIXFAM {
+ public:
+ // Constructors
+ ZPXFAM(PDOSDEF tdp);
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+
+ // Methods
+ virtual int Cardinality(PGLOBAL g) {return 0;}
+ virtual int GetFileLength(PGLOBAL g) {return g ? 0 : 1;}
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+ virtual void CloseTableFile(PGLOBAL g, bool abort);
-protected:
+ protected:
// Members
ZIPUTIL *zutp;
PSZ target;
- bool mul;
+ bool append;
}; // end of ZPXFAM
#endif // __FILAMZIP_H
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index e66f03e8174..d1ab18f52d5 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2016
+/* Copyright (C) Olivier Bertrand 2004 - 2017
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -125,6 +125,8 @@
#endif // UNIX
#include "global.h"
#include "plgdbsem.h"
+#include "xtable.h"
+#include "tabext.h"
#if defined(ODBC_SUPPORT)
#include "odbccat.h"
#endif // ODBC_SUPPORT
@@ -132,12 +134,11 @@
#include "tabjdbc.h"
#include "jdbconn.h"
#endif // JDBC_SUPPORT
-#include "xtable.h"
#include "tabmysql.h"
#include "filamdbf.h"
#include "tabxcl.h"
#include "tabfmt.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabcol.h"
#include "xindex.h"
#if defined(__WIN__)
@@ -171,9 +172,9 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.05.0001 December 13, 2016";
+ char version[]= "Version 1.05.0003 February 27, 2017";
#if defined(__WIN__)
- char compver[]= "Version 1.05.0001 " __DATE__ " " __TIME__;
+ char compver[]= "Version 1.05.0003 " __DATE__ " " __TIME__;
char slash= '\\';
#else // !__WIN__
char slash= '/';
@@ -214,6 +215,7 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v);
void PushWarning(PGLOBAL g, THD *thd, int level);
bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host,
const char *db, char *tab, const char *src, int port);
+bool ZipLoadFile(PGLOBAL, char*, char*, char*, bool, bool);
bool ExactInfo(void);
USETEMP UseTemp(void);
int GetConvSize(void);
@@ -556,7 +558,7 @@ ha_create_table_option connect_index_option_list[]=
/***********************************************************************/
/* Push G->Message as a MySQL warning. */
/***********************************************************************/
-bool PushWarning(PGLOBAL g, PTDBASE tdbp, int level)
+bool PushWarning(PGLOBAL g, PTDB tdbp, int level)
{
PHC phc;
THD *thd;
@@ -1024,7 +1026,7 @@ char *GetListOption(PGLOBAL g, const char *opname,
char key[16], val[256];
char *pk, *pv, *pn;
- char *opval= (char*) def;
+ char *opval= (char*)def;
int n;
for (pk= (char*)oplist; pk; pk= ++pn) {
@@ -1032,26 +1034,17 @@ char *GetListOption(PGLOBAL g, const char *opname,
pv= strchr(pk, '=');
if (pv && (!pn || pv < pn)) {
- n= pv - pk;
+ n= MY_MIN(pv - pk, sizeof(key) - 1);
memcpy(key, pk, n);
key[n]= 0;
pv++;
-
- if (pn) {
- n= pn - pv;
- memcpy(val, pv, n);
- val[n]= 0;
- } else
- strcpy(val, pv);
-
+ n= MY_MIN((pn ? pn - pv : strlen(pv)), sizeof(val) - 1);
+ memcpy(val, pv, n);
+ val[n]= 0;
} else {
- if (pn) {
- n= MY_MIN(pn - pk, 15);
- memcpy(key, pk, n);
- key[n]= 0;
- } else
- strcpy(key, pk);
-
+ n= MY_MIN((pn ? pn - pk : strlen(pk)), sizeof(key) - 1);
+ memcpy(key, pk, n);
+ key[n]= 0;
val[0]= 0;
} // endif pv
@@ -1105,7 +1098,7 @@ char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef)
else if (!stricmp(opname, "Data_charset"))
opval= options->data_charset;
- if (!opval && options && options->oplist)
+ if (!opval && options->oplist)
opval= GetListOption(g, opname, options->oplist);
return opval ? (char*)opval : sdef;
@@ -2113,7 +2106,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *)
PCOL colp;
PVAL value, sdvalin;
Field *fp;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
String attribute(attr_buffer, sizeof(attr_buffer),
table->s->table_charset);
my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set);
@@ -2132,7 +2125,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *)
&& tdbp->GetAmType() != TYPE_AM_ODBC
&& tdbp->GetAmType() != TYPE_AM_JDBC) ||
bitmap_is_set(table->write_set, fp->field_index)) {
- for (colp= tp->GetSetCols(); colp; colp= colp->GetNext())
+ for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext())
if (!stricmp(colp->GetName(), fp->field_name))
break;
@@ -2219,7 +2212,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *)
} else if (xmod == MODE_UPDATE) {
PCOL cp;
- for (cp= tp->GetColumns(); cp; cp= cp->GetNext())
+ for (cp= tdbp->GetColumns(); cp; cp= cp->GetNext())
if (!stricmp(colp->GetName(), cp->GetName()))
break;
@@ -2686,7 +2679,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
{
AMT tty = filp->Type;
char *body= filp->Body;
- unsigned int i;
+ char *havg= filp->Having;
+ unsigned int i;
bool ismul= false, x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC);
bool nonul= ((tty == TYPE_AM_ODBC || tty == TYPE_AM_JDBC) &&
(tdbp->GetMode() == MODE_INSERT || tdbp->GetMode() == MODE_DELETE));
@@ -2699,7 +2693,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
htrc("Cond type=%d\n", cond->type());
if (cond->type() == COND::COND_ITEM) {
- char *p1, *p2;
+ char *pb0, *pb1, *pb2, *ph0, *ph1, *ph2;
+ bool bb = false, bh = false;
Item_cond *cond_item= (Item_cond *)cond;
if (x)
@@ -2719,38 +2714,78 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
List_iterator<Item> li(*arglist);
const Item *subitem;
- p1= body + strlen(body);
- strcpy(p1, "(");
- p2= p1 + 1;
+ pb0= pb1= body + strlen(body);
+ strcpy(pb0, "(");
+ pb2= pb1 + 1;
+
+ if (havg) {
+ ph0= ph1= havg + strlen(havg);
+ strcpy(ph0, "(");
+ ph2= ph1 + 1;
+ } // endif havg
for (i= 0; i < arglist->elements; i++)
if ((subitem= li++)) {
if (!CheckCond(g, filp, subitem)) {
if (vop == OP_OR || nonul)
return NULL;
- else
- *p2= 0;
+ else {
+ *pb2= 0;
+ if (havg) *ph2= 0;
+ } // endelse
} else {
- p1= p2 + strlen(p2);
- strcpy(p1, GetValStr(vop, false));
- p2= p1 + strlen(p1);
+ if (filp->Bd) {
+ pb1= pb2 + strlen(pb2);
+ strcpy(pb1, GetValStr(vop, false));
+ pb2= pb1 + strlen(pb1);
+ } // endif Bd
+
+ if (filp->Hv) {
+ ph1= ph2 + strlen(ph2);
+ strcpy(ph1, GetValStr(vop, false));
+ ph2= ph1 + strlen(ph1);
+ } // endif Hv
+
} // endif CheckCond
+ bb |= filp->Bd;
+ bh |= filp->Hv;
+ filp->Bd = filp->Hv = false;
} else
return NULL;
- if (*p1 != '(')
- strcpy(p1, ")");
- else
- return NULL;
+ if (bb) {
+ strcpy(pb1, ")");
+ filp->Bd = bb;
+ } else
+ *pb0= 0;
+
+ if (havg) {
+ if (bb && bh && vop == OP_OR) {
+ // Cannot or'ed a where clause with a having clause
+ bb= bh= 0;
+ *pb0 = 0;
+ *ph0 = 0;
+ } else if (bh) {
+ strcpy(ph1, ")");
+ filp->Hv= bh;
+ } else
+ *ph0 = 0;
+
+ } // endif havg
+
+ if (!bb && !bh)
+ return NULL;
} else if (cond->type() == COND::FUNC_ITEM) {
unsigned int i;
- bool iscol, neg= FALSE;
+ bool iscol, ishav= false, neg= false;
Item_func *condf= (Item_func *)cond;
Item* *args= condf->arguments();
+ filp->Bd = filp->Hv = false;
+
if (trace)
htrc("Func type=%d argnum=%d\n", condf->functype(),
condf->argument_count());
@@ -2799,8 +2834,9 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
ha_field_option_struct *fop;
Item_field *pField= (Item_field *)args[i];
- if (x && i)
- return NULL;
+ // IN and BETWEEN clauses should be col VOP list
+ if (i && (x || ismul))
+ return NULL; // IN and BETWEEN clauses should be col VOP list
else if (pField->field->table != table)
return NULL; // Field does not belong to this table
else if (tty != TYPE_AM_WMI && IsIndexed(pField->field))
@@ -2816,10 +2852,19 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
else
return NULL;
- } else if (tty == TYPE_AM_TBL)
- return NULL;
- else
- fnm= pField->field->field_name;
+ } else if (tty == TYPE_AM_TBL) {
+ return NULL;
+ } else {
+ bool h;
+
+ fnm = filp->Chk(pField->field->field_name, &h);
+
+ if (h && i && !ishav)
+ return NULL; // Having should be col VOP arg
+ else
+ ishav = h;
+
+ } // endif's
if (trace) {
htrc("Field index=%d\n", pField->field->field_index);
@@ -2828,11 +2873,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
htrc("Field_type=%d\n", args[i]->field_type());
} // endif trace
- // IN and BETWEEN clauses should be col VOP list
- if (i && ismul)
- return NULL;
-
- strcat(body, fnm);
+ strcat((ishav ? havg : body), fnm);
} else if (args[i]->type() == COND::FUNC_ITEM) {
if (tty == TYPE_AM_MYSQL) {
if (!CheckCond(g, filp, args[i]))
@@ -2871,32 +2912,34 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
return NULL;
if (!x) {
+ char *s = (ishav) ? havg : body;
+
// Append the value to the filter
switch (args[i]->field_type()) {
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
if (tty == TYPE_AM_ODBC) {
- strcat(body, "{ts '");
- strncat(body, res->ptr(), res->length());
+ strcat(s, "{ts '");
+ strncat(s, res->ptr(), res->length());
if (res->length() < 19)
- strcat(body, "1970-01-01 00:00:00" + res->length());
+ strcat(s, "1970-01-01 00:00:00" + res->length());
- strcat(body, "'}");
+ strcat(s, "'}");
break;
} // endif ODBC
case MYSQL_TYPE_DATE:
if (tty == TYPE_AM_ODBC) {
- strcat(body, "{d '");
- strcat(strncat(body, res->ptr(), res->length()), "'}");
+ strcat(s, "{d '");
+ strcat(strncat(s, res->ptr(), res->length()), "'}");
break;
} // endif ODBC
case MYSQL_TYPE_TIME:
if (tty == TYPE_AM_ODBC) {
- strcat(body, "{t '");
- strcat(strncat(body, res->ptr(), res->length()), "'}");
+ strcat(s, "{t '");
+ strcat(strncat(s, res->ptr(), res->length()), "'}");
break;
} // endif ODBC
@@ -2905,39 +2948,39 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
switch (args[0]->field_type()) {
case MYSQL_TYPE_TIMESTAMP:
case MYSQL_TYPE_DATETIME:
- strcat(body, "{ts '");
- strncat(body, res->ptr(), res->length());
+ strcat(s, "{ts '");
+ strncat(s, res->ptr(), res->length());
if (res->length() < 19)
- strcat(body, "1970-01-01 00:00:00" + res->length());
+ strcat(s, "1970-01-01 00:00:00" + res->length());
- strcat(body, "'}");
+ strcat(s, "'}");
break;
case MYSQL_TYPE_DATE:
- strcat(body, "{d '");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'}");
+ strcat(s, "{d '");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'}");
break;
case MYSQL_TYPE_TIME:
- strcat(body, "{t '");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'}");
+ strcat(s, "{t '");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'}");
break;
default:
- strcat(body, "'");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'");
+ strcat(s, "'");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'");
} // endswitch field type
} else {
- strcat(body, "'");
- strncat(body, res->ptr(), res->length());
- strcat(body, "'");
+ strcat(s, "'");
+ strncat(s, res->ptr(), res->length());
+ strcat(s, "'");
} // endif tty
break;
default:
- strncat(body, res->ptr(), res->length());
+ strncat(s, res->ptr(), res->length());
} // endswitch field type
} else {
@@ -2953,22 +2996,28 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
} // endif x
- } // endif
+ } // endif's Type
if (!x) {
- if (!i)
- strcat(body, GetValStr(vop, neg));
+ char *s = (ishav) ? havg : body;
+
+ if (!i)
+ strcat(s, GetValStr(vop, neg));
else if (vop == OP_XX && i == 1)
- strcat(body, " AND ");
+ strcat(s, " AND ");
else if (vop == OP_IN)
- strcat(body, (i == condf->argument_count() - 1) ? ")" : ",");
+ strcat(s, (i == condf->argument_count() - 1) ? ")" : ",");
} // endif x
} // endfor i
- if (x)
- filp->Op= vop;
+ if (x)
+ filp->Op = vop;
+ else if (ishav)
+ filp->Hv = true;
+ else
+ filp->Bd = true;
} else {
if (trace)
@@ -3025,16 +3074,28 @@ const COND *ha_connect::cond_push(const COND *cond)
if (b) {
PCFIL filp;
+ int rc;
if ((filp= tdbp->GetCondFil()) && filp->Cond == cond &&
filp->Idx == active_index && filp->Type == tty)
goto fin; // Already done
filp= new(g) CONDFIL(cond, active_index, tty);
- filp->Body= (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
- *filp->Body= 0;
+ rc = filp->Init(g, this);
+
+ if (rc == RC_INFO) {
+ filp->Having = (char*)PlugSubAlloc(g, NULL, 256);
+ *filp->Having = 0;
+ } else if (rc == RC_FX)
+ goto fin;
+
+ filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0);
+ *filp->Body = 0;
if (CheckCond(g, filp, cond)) {
+ if (filp->Having && strlen(filp->Having) > 255)
+ goto fin; // Memory collapse
+
if (trace)
htrc("cond_push: %s\n", filp->Body);
@@ -3207,9 +3268,9 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*)
tdbp= GetTDB(g);
dup->Check |= CHK_OPT;
- if (tdbp) {
+ if (tdbp && !tdbp->IsRemote()) {
bool dop= IsTypeIndexable(GetRealType(NULL));
- bool dox= (((PTDBASE)tdbp)->GetDef()->Indexable() == 1);
+ bool dox= (tdbp->GetDef()->Indexable() == 1);
if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) {
if (rc == RC_INFO) {
@@ -3220,7 +3281,7 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*)
} // endif rc
- } else
+ } else if (!tdbp)
rc= HA_ERR_INTERNAL_ERROR;
return rc;
@@ -3463,9 +3524,9 @@ int ha_connect::index_init(uint idx, bool sorted)
htrc("index_init CONNECT: %s\n", g->Message);
active_index= MAX_KEY;
rc= HA_ERR_INTERNAL_ERROR;
- } else if (((PTDBDOX)tdbp)->To_Kindex) {
+ } else if (tdbp->GetKindex()) {
if (((PTDBDOX)tdbp)->To_Kindex->GetNum_K()) {
- if (((PTDBASE)tdbp)->GetFtype() != RECFM_NAF)
+ if (tdbp->GetFtype() != RECFM_NAF)
((PTDBDOX)tdbp)->GetTxfp()->ResetBuffer(g);
active_index= idx;
@@ -3879,11 +3940,10 @@ int ha_connect::rnd_next(uchar *buf)
void ha_connect::position(const uchar *)
{
DBUG_ENTER("ha_connect::position");
-//if (((PTDBASE)tdbp)->GetDef()->Indexable())
- my_store_ptr(ref, ref_length, (my_off_t)((PTDBASE)tdbp)->GetRecpos());
+ my_store_ptr(ref, ref_length, (my_off_t)tdbp->GetRecpos());
if (trace > 1)
- htrc("position: pos=%d\n", ((PTDBASE)tdbp)->GetRecpos());
+ htrc("position: pos=%d\n", tdbp->GetRecpos());
DBUG_VOID_RETURN;
} // end of position
@@ -3908,14 +3968,14 @@ void ha_connect::position(const uchar *)
int ha_connect::rnd_pos(uchar *buf, uchar *pos)
{
int rc;
- PTDBASE tp= (PTDBASE)tdbp;
+//PTDBASE tp= (PTDBASE)tdbp;
DBUG_ENTER("ha_connect::rnd_pos");
- if (!tp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) {
+ if (!tdbp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) {
if (trace)
- htrc("rnd_pos: %d\n", tp->GetRecpos());
+ htrc("rnd_pos: %d\n", tdbp->GetRecpos());
- tp->SetFilter(NULL);
+ tdbp->SetFilter(NULL);
rc= rnd_next(buf);
} else
rc= HA_ERR_KEY_NOT_FOUND;
@@ -4093,7 +4153,7 @@ int ha_connect::delete_all_rows()
if (tdbp && tdbp->GetUse() == USE_OPEN &&
tdbp->GetAmType() != TYPE_AM_XML &&
- ((PTDBASE)tdbp)->GetFtype() != RECFM_NAF)
+ tdbp->GetFtype() != RECFM_NAF)
// Close and reopen the table so it will be deleted
rc= CloseTable(g);
@@ -4471,12 +4531,12 @@ int ha_connect::external_lock(THD *thd, int lock_type)
if (!tdbp) {
if (!(tdbp= GetTDB(g)))
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
- else if (!((PTDBASE)tdbp)->GetDef()->Indexable()) {
+ else if (!tdbp->GetDef()->Indexable()) {
sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName());
// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
DBUG_RETURN(0);
- } else if (((PTDBASE)tdbp)->GetDef()->Indexable() == 1) {
+ } else if (tdbp->GetDef()->Indexable() == 1) {
bool oldsep= ((PCHK)g->Xchk)->oldsep;
bool newsep= ((PCHK)g->Xchk)->newsep;
PTDBDOS tdp= (PTDBDOS)tdbp;
@@ -4557,7 +4617,7 @@ int ha_connect::external_lock(THD *thd, int lock_type)
rc= 0;
} // endif MakeIndex
- } else if (((PTDBASE)tdbp)->GetDef()->Indexable() == 3) {
+ } else if (tdbp->GetDef()->Indexable() == 3) {
if (CheckVirtualIndex(NULL)) {
// Make it a warning to avoid crash
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -5225,6 +5285,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
fncn= topt->catfunc;
fnc= GetFuncID(fncn);
sep= topt->separator;
+ mul = (int)topt->multiple;
+ tbl= topt->tablist;
col= topt->colist;
if (topt->oplist) {
@@ -5422,8 +5484,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
if (mydef->GetPassword())
pwd= mydef->GetPassword();
- if (mydef->GetDatabase())
- db= mydef->GetDatabase();
+ if (mydef->GetTabschema())
+ db = mydef->GetTabschema();
if (mydef->GetTabname())
tab= mydef->GetTabname();
@@ -6049,8 +6111,8 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (mydef->GetHostname())
host= mydef->GetHostname();
- if (mydef->GetDatabase())
- db= mydef->GetDatabase();
+ if (mydef->GetTabschema())
+ db = mydef->GetTabschema();
if (mydef->GetTabname())
tab= mydef->GetTabname();
@@ -6263,21 +6325,26 @@ int ha_connect::create(const char *name, TABLE *table_arg,
// Check for incompatible options
if (options->sepindex) {
my_message(ER_UNKNOWN_ERROR,
- "SEPINDEX is incompatible with unspecified file name",
- MYF(0));
+ "SEPINDEX is incompatible with unspecified file name", MYF(0));
DBUG_RETURN(HA_ERR_UNSUPPORTED);
- } else if (GetTypeID(options->type) == TAB_VEC)
- if (!table->s->max_rows || options->split) {
- my_printf_error(ER_UNKNOWN_ERROR,
- "%s tables whose file name is unspecified cannot be split",
- MYF(0), options->type);
- DBUG_RETURN(HA_ERR_UNSUPPORTED);
- } else if (options->header == 2) {
- my_printf_error(ER_UNKNOWN_ERROR,
- "header=2 is not allowed for %s tables whose file name is unspecified",
- MYF(0), options->type);
- DBUG_RETURN(HA_ERR_UNSUPPORTED);
- } // endif's
+ } else if (GetTypeID(options->type) == TAB_VEC) {
+ if (!table->s->max_rows || options->split) {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "%s tables whose file name is unspecified cannot be split",
+ MYF(0), options->type);
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ } else if (options->header == 2) {
+ my_printf_error(ER_UNKNOWN_ERROR,
+ "header=2 is not allowed for %s tables whose file name is unspecified",
+ MYF(0), options->type);
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ } // endif's
+
+ } else if (options->zipped) {
+ my_message(ER_UNKNOWN_ERROR,
+ "ZIPPED is incompatible with unspecified file name", MYF(0));
+ DBUG_RETURN(HA_ERR_UNSUPPORTED);
+ } // endif's options
// Fold type to lower case
for (int i= 0; i < 12; i++)
@@ -6330,6 +6397,36 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (trace)
htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas);
+ if (options->zipped) {
+ // Check whether the zip entry must be made from a file
+ char *fn = GetListOption(g, "Load", options->oplist, NULL);
+
+ if (fn) {
+ char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH];
+ char *entry = GetListOption(g, "Entry", options->oplist, NULL);
+ char *a = GetListOption(g, "Append", options->oplist, "NO");
+ bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON");
+ char *m = GetListOption(g, "Mulentries", options->oplist, "NO");
+ bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
+
+ if (!entry && !mul) {
+ my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ } // endif entry
+
+ strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/");
+ PlugSetPath(zbuf, options->filename, dbpath);
+ PlugSetPath(buf, fn, dbpath);
+
+ if (ZipLoadFile(g, zbuf, buf, entry, append, mul)) {
+ my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+ } // endif LoadFile
+
+ } // endif fn
+
+ } // endif zipped
+
// To check whether indexes have to be made or remade
if (!g->Xchk) {
PIXDEF xdp;
@@ -6948,10 +7045,10 @@ maria_declare_plugin(connect)
PLUGIN_LICENSE_GPL,
connect_init_func, /* Plugin Init */
connect_done_func, /* Plugin Deinit */
- 0x0104, /* version number (1.04) */
+ 0x0105, /* version number (1.05) */
NULL, /* status variables */
connect_system_variables, /* system variables */
- "1.05.0001", /* string version */
+ "1.05.0003", /* string version */
MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */
}
maria_declare_plugin_end;
diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp
index a69f84a94a1..c1d077406b7 100644
--- a/storage/connect/jdbconn.cpp
+++ b/storage/connect/jdbconn.cpp
@@ -1,7 +1,7 @@
/************ Jdbconn C++ Functions Source Code File (.CPP) ************/
-/* Name: JDBCONN.CPP Version 1.0 */
+/* Name: JDBCONN.CPP Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* This file contains the JDBC connection classes functions. */
/***********************************************************************/
@@ -45,9 +45,9 @@
#include "plgdbsem.h"
#include "xobject.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabjdbc.h"
//#include "jdbconn.h"
-//#include "plgcnx.h" // For DB types
#include "resource.h"
#include "valblk.h"
#include "osutil.h"
@@ -318,13 +318,21 @@ PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat,
/**************************************************************************/
PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp)
{
+ char *sqry;
PQRYRES qrp;
JDBConn *jcp = new(g)JDBConn(g, NULL);
if (jcp->Open(sjp))
return NULL;
- qrp = jcp->GetMetaData(g, src);
+ if (strstr(src, "%s")) {
+ // Place holder for an eventual where clause
+ sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 2);
+ sprintf(sqry, src, "1=1"); // dummy where clause
+ } else
+ sqry = src;
+
+ qrp = jcp->GetMetaData(g, sqry);
jcp->Close();
return qrp;
} // end of JDBCSrcCols
@@ -818,6 +826,11 @@ int JDBConn::Open(PJPARM sop)
jpop->Append(GetPluginDir());
jpop->Append("JdbcInterface.jar");
+ // All wrappers are pre-compiled in JavaWrappers.jar in the plugin dir
+ jpop->Append(sep);
+ jpop->Append(GetPluginDir());
+ jpop->Append("JavaWrappers.jar");
+
//================== prepare loading of Java VM ============================
JavaVMInitArgs vm_args; // Initialization arguments
JavaVMOption* options = new JavaVMOption[N]; // JVM invocation options
@@ -1157,6 +1170,9 @@ void JDBConn::Close()
jint rc;
jmethodID did = nullptr;
+ // Could have been detached in case of join
+ rc = jvm->AttachCurrentThread((void**)&env, nullptr);
+
if (gmID(m_G, did, "JdbcDisconnect", "()I"))
printf("%s\n", Msg);
else if (Check(env->CallIntMethod(job, did)))
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index c45630129f1..b473871e9f7 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -1,7 +1,7 @@
/*************** json CPP Declares Source Code File (.H) ***************/
-/* Name: json.cpp Version 1.2 */
+/* Name: json.cpp Version 1.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
/* */
/* This file contains the JSON classes functions. */
/***********************************************************************/
@@ -27,8 +27,33 @@
#define EL "\r\n"
#else
#define EL "\n"
+#undef SE_CATCH // Does not work for Linux
#endif
+#if defined(SE_CATCH)
+/**************************************************************************/
+/* This is the support of catching C interrupts to prevent crashes. */
+/**************************************************************************/
+#include <eh.h>
+
+class SE_Exception {
+public:
+ SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
+ ~SE_Exception() {}
+
+ unsigned int nSE;
+ PEXCEPTION_RECORD eRec;
+}; // end of class SE_Exception
+
+void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp)
+{
+ throw SE_Exception(u, pExp->ExceptionRecord);
+} // end of trans_func
+
+char *GetExceptionDesc(PGLOBAL g, unsigned int e);
+#endif // SE_CATCH
+
+
/***********************************************************************/
/* Parse a json string. */
/* Note: when pretty is not known, the caller set pretty to 3. */
@@ -40,6 +65,9 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
PJSON jsp = NULL;
STRG src;
+ if (trace)
+ htrc("ParseJson: s=%.10s len=%d\n", s, len);
+
if (!s || !len) {
strcpy(g->Message, "Void JSON object");
return NULL;
@@ -53,15 +81,37 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
pty[0] = false;
+
// Save stack and allocation environment and prepare error return
if (g->jump_level == MAX_JUMP) {
strcpy(g->Message, MSG(TOO_MANY_JUMPS));
return NULL;
} // endif jump_level
- if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) {
- goto err;
- } // endif rc
+#if defined(SE_CATCH)
+ // Let's try to recover from any kind of interrupt
+ _se_translator_function f = _set_se_translator(trans_func);
+
+ try {
+#endif // SE_CATCH --------------------- try section --------------------
+ if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) {
+ goto err;
+ } // endif rc
+
+#if defined(SE_CATCH) // ------------- end of try section -----------------
+ } catch (SE_Exception e) {
+ sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)",
+ GetExceptionDesc(g, e.nSE), e.nSE);
+ _set_se_translator(f);
+ goto err;
+ } catch (...) {
+ strcpy(g->Message, "Exception doing setjmp");
+ _set_se_translator(f);
+ goto err;
+ } // end of try-catches
+
+ _set_se_translator(f);
+#endif // SE_CATCH
for (i = 0; i < len; i++)
switch (s[i]) {
@@ -140,7 +190,7 @@ tryit:
strcpy(g->Message, "More than one item in file");
err:
- g->jump_level--;
+ g->jump_level--;
return NULL;
} // end of ParseJson
@@ -390,14 +440,14 @@ char *ParseString(PGLOBAL g, int& i, STRG& src)
// if (charset == utf8) {
char xs[5];
uint hex;
-
+
xs[0] = s[++i];
xs[1] = s[++i];
xs[2] = s[++i];
xs[3] = s[++i];
xs[4] = 0;
hex = strtoul(xs, NULL, 16);
-
+
if (hex < 0x80) {
p[n] = (uchar)hex;
} else if (hex < 0x800) {
@@ -414,7 +464,7 @@ char *ParseString(PGLOBAL g, int& i, STRG& src)
} else {
char xs[3];
UINT hex;
-
+
i += 2;
xs[0] = s[++i];
xs[1] = s[++i];
@@ -468,7 +518,7 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src)
case '.':
if (!found_digit || has_dot || has_e)
goto err;
-
+
has_dot = true;
break;
case 'e':
@@ -769,7 +819,7 @@ bool JOUTSTR::Escape(const char *s)
for (unsigned int i = 0; s[i]; i++)
switch (s[i]) {
- case '"':
+ case '"':
case '\\':
case '\t':
case '\n':
@@ -1057,7 +1107,7 @@ void JARRAY::InitArray(PGLOBAL g)
int i;
PJVAL jvp, *pjvp = &First;
- for (Size = 0, jvp = First; jvp; jvp = jvp->Next)
+ for (Size = 0, jvp = First; jvp; jvp = jvp->Next)
if (!jvp->Del)
Size++;
@@ -1191,8 +1241,8 @@ bool JARRAY::IsNull(void)
/***********************************************************************/
JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
{
- Jsp = NULL;
- Value = AllocateValue(g, valp);
+ Jsp = NULL;
+ Value = AllocateValue(g, valp);
Next = NULL;
Del = false;
} // end of JVALUE constructor
@@ -1297,7 +1347,7 @@ PSZ JVALUE::GetText(PGLOBAL g, PSZ text)
} // end of GetText
void JVALUE::SetValue(PJSON jsp)
-{
+{
if (jsp && jsp->GetType() == TYPE_JVAL) {
Jsp = jsp->GetJsp();
Value = jsp->GetValue();
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index f9034f25739..0f693c3c0d6 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -1,6 +1,6 @@
/****************** jsonudf C++ Program Source Code File (.CPP) ******************/
-/* PROGRAM NAME: jsonudf Version 1.4 */
-/* (C) Copyright to the author Olivier BERTRAND 2015-2016 */
+/* PROGRAM NAME: jsonudf Version 1.5 */
+/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */
/* This program are the JSON User Defined Functions . */
/*********************************************************************************/
@@ -242,13 +242,16 @@ my_bool JSNX::ParseJpath(PGLOBAL g)
// Jpath = Name;
return true;
- pbuf = PlugDup(g, Jpath);
+ if (!(pbuf = PlgDBDup(g, Jpath)))
+ return true;
// The Jpath must be analyzed
for (i = 0, p = pbuf; (p = strchr(p, ':')); i++, p++)
Nod++; // One path node found
- Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE));
+ if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE))))
+ return true;
+
memset(Nodes, 0, (Nod)* sizeof(JNODE));
// Analyze the Jpath for this column
@@ -1086,9 +1089,10 @@ inline void JsonFreeMem(PGLOBAL g)
/*********************************************************************************/
static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
char *message, my_bool mbn,
- unsigned long reslen, unsigned long memlen)
+ unsigned long reslen, unsigned long memlen,
+ unsigned long more = 0)
{
- PGLOBAL g = PlugInit(NULL, memlen);
+ PGLOBAL g = PlugInit(NULL, memlen + more);
if (!g) {
strcpy(message, "Allocation error");
@@ -1100,6 +1104,7 @@ static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
} // endif g
g->Mrr = (args->arg_count && args->args[0]) ? 1 : 0;
+ g->ActivityStart = (PACTIVITY)more;
initid->maybe_null = mbn;
initid->max_length = reslen;
initid->ptr = (char*)g;
@@ -1444,6 +1449,8 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
} // endif b
+ ml += (unsigned long)g->ActivityStart; // more
+
if (ml > g->Sarea_Size) {
free(g->Sarea);
@@ -2758,7 +2765,7 @@ void json_item_merge_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more;
int n = IsJson(args, 0);
if (args->arg_count < 2) {
@@ -2767,7 +2774,7 @@ my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else if (!n && args->arg_type[0] != STRING_RESULT) {
strcpy(message, "First argument must be a json item");
return true;
- } else if (args->arg_type[1] != STRING_RESULT) {
+ } else if (args->arg_type[1] != STRING_RESULT) {
strcpy(message, "Second argument is not a string (jpath)");
return true;
} else
@@ -2780,11 +2787,13 @@ my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
- } else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more = fl * 3;
+ } else if (n != 3) {
+ more = args->lengths[0] * 3;
+ } else
+ more = 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of json_get_item_init
char *json_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2885,7 +2894,7 @@ my_bool jsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif's
CalcLen(args, false, reslen, memlen);
- memlen += more;
+//memlen += more;
if (n == 2 && args->args[0]) {
char fn[_MAX_PATH];
@@ -2894,11 +2903,11 @@ my_bool jsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
+ more += fl * 3;
} else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more += args->lengths[0] * 3;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonget_string_init
char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -2994,7 +3003,7 @@ void jsonget_string_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more;
if (args->arg_count != 2) {
strcpy(message, "This function must have 2 arguments");
@@ -3008,10 +3017,10 @@ my_bool jsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} else
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more = (IsJson(args, 0) != 3) ? 1000 : 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonget_int_init
long long jsonget_int(UDF_INIT *initid, UDF_ARGS *args,
@@ -3100,7 +3109,7 @@ void jsonget_int_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more;
if (args->arg_count < 2) {
strcpy(message, "At least 2 arguments required");
@@ -3123,10 +3132,10 @@ my_bool jsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more = (IsJson(args, 0) != 3) ? 1000 : 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonget_real_init
double jsonget_real(UDF_INIT *initid, UDF_ARGS *args,
@@ -3234,10 +3243,11 @@ my_bool jsonlocate_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += more; // TODO: calculate this
+ // TODO: calculate this
+ if (IsJson(args, 0) == 3)
+ more = 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsonlocate_init
char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3358,10 +3368,11 @@ my_bool json_locate_all_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
CalcLen(args, false, reslen, memlen);
- if (IsJson(args, 0) != 3)
- memlen += more; // TODO: calculate this
+ // TODO: calculate this
+ if (IsJson(args, 0) == 3)
+ more = 0;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of json_locate_all_init
char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3485,12 +3496,12 @@ my_bool jsoncontains_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif's
CalcLen(args, false, reslen, memlen);
- memlen += more;
+//memlen += more;
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more += (IsJson(args, 0) != 3 ? 1000 : 0);
- return JsonInit(initid, args, message, false, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen, more);
} // end of jsoncontains_init
long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3537,12 +3548,12 @@ my_bool jsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
} // endif's
CalcLen(args, false, reslen, memlen);
- memlen += more;
+//memlen += more;
- if (IsJson(args, 0) != 3)
- memlen += 1000; // TODO: calculate this
+ // TODO: calculate this
+ more += (IsJson(args, 0) != 3 ? 1000 : 0);
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsoncontains_path_init
long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -3736,7 +3747,7 @@ fin:
/*********************************************************************************/
my_bool json_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more = 0;
int n = IsJson(args, 0);
if (!(args->arg_count % 2)) {
@@ -3755,11 +3766,11 @@ my_bool json_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
+ more += fl * 3;
} else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more += args->lengths[0] * 3;
- if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ if (!JsonInit(initid, args, message, true, reslen, memlen, more)) {
PGLOBAL g = (PGLOBAL)initid->ptr;
// This is a constant function
@@ -3954,7 +3965,7 @@ void json_file_deinit(UDF_INIT* initid)
/*********************************************************************************/
my_bool jfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen, more = 1024;
+ unsigned long reslen, memlen;
if (args->arg_count < 1 || args->arg_count > 3) {
strcpy(message, "Wrong number of arguments");
@@ -4993,7 +5004,7 @@ fin:
/*********************************************************************************/
my_bool jbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
{
- unsigned long reslen, memlen;
+ unsigned long reslen, memlen, more = 0;
int n = IsJson(args, 0);
if (!(args->arg_count % 2)) {
@@ -5012,11 +5023,11 @@ my_bool jbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
memcpy(fn, args->args[0], args->lengths[0]);
fn[args->lengths[0]] = 0;
fl = GetFileLength(fn);
- memlen += fl * 3;
+ more = fl * 3;
} else if (n != 3)
- memlen += args->lengths[0] * 3;
+ more = args->lengths[0] * 3;
- return JsonInit(initid, args, message, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jbin_set_item_init
char *jbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -5104,8 +5115,8 @@ my_bool jbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
fl = GetFileLength(args->args[0]);
reslen += fl;
more += fl * M;
- memlen += more;
- return JsonInit(initid, args, message, true, reslen, memlen);
+//memlen += more;
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jbin_file_init
char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index 497fe5e1aa8..30ac7613dd6 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -1,4 +1,4 @@
-/* Copyright (C) Olivier Bertrand 2004 - 2016
+/* Copyright (C) Olivier Bertrand 2004 - 2017
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,9 +16,9 @@
/*************** Mycat CC Program Source Code File (.CC) ***************/
/* PROGRAM NAME: MYCAT */
/* ------------- */
-/* Version 1.5 */
+/* Version 1.6 */
/* */
-/* Author: Olivier Bertrand 2012 - 2016 */
+/* Author: Olivier Bertrand 2012 - 2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -58,9 +58,10 @@
#endif // UNIX
#include "global.h"
#include "plgdbsem.h"
-#include "reldef.h"
-#include "tabcol.h"
+//#include "reldef.h"
#include "xtable.h"
+#include "tabext.h"
+#include "tabcol.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabfmt.h"
@@ -559,13 +560,13 @@ PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_XML: tdp= new(g) XMLDEF; break;
#endif // XML_SUPPORT
#if defined(VCT_SUPPORT)
- case TAB_VEC: tdp = new(g)VCTDEF; break;
+ case TAB_VEC: tdp = new(g) VCTDEF; break;
#endif // VCT_SUPPORT
#if defined(ODBC_SUPPORT)
case TAB_ODBC: tdp= new(g) ODBCDEF; break;
#endif // ODBC_SUPPORT
#if defined(JDBC_SUPPORT)
- case TAB_JDBC: tdp= new(g)JDBCDEF; break;
+ case TAB_JDBC: tdp= new(g) JDBCDEF; break;
#endif // JDBC_SUPPORT
#if defined(__WIN__)
case TAB_MAC: tdp= new(g) MACDEF; break;
diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp
index 644ca019e4a..d05254a32a6 100644
--- a/storage/connect/myconn.cpp
+++ b/storage/connect/myconn.cpp
@@ -1,11 +1,11 @@
/************** MyConn C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: MYCONN */
/* ------------- */
-/* Version 1.8 */
+/* Version 1.9 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2007-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2007-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -375,10 +375,18 @@ PQRYRES SrcColumns(PGLOBAL g, const char *host, const char *db,
if (!port)
port = mysqld_port;
- if (!strnicmp(srcdef, "select ", 7)) {
- query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 9);
- strcat(strcpy(query, srcdef), " LIMIT 0");
- } else
+ if (!strnicmp(srcdef, "select ", 7) || strstr(srcdef, "%s")) {
+ query = (char *)PlugSubAlloc(g, NULL, strlen(srcdef) + 10);
+
+ if (strstr(srcdef, "%s"))
+ sprintf(query, srcdef, "1=1"); // dummy where clause
+ else
+ strcpy(query, srcdef);
+
+ if (!strnicmp(srcdef, "select ", 7))
+ strcat(query, " LIMIT 0");
+
+ } else
query = (char *)srcdef;
// Open a MySQL connection for this table
diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result
new file mode 100644
index 00000000000..f176149c53f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/xml_zip.result
@@ -0,0 +1,98 @@
+Warnings:
+Warning 1105 No file name. Table will use t1.xml
+#
+# Testing zipped XML tables
+#
+CREATE TABLE t1 (
+ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
+LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
+SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+TITLE CHAR(30) NOT NULL,
+PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+DATEPUB CHAR(4) NOT NULL
+) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+SELECT * FROM t1;
+ISBN 9782212090819
+LANG fr
+SUBJECT applications
+AUTHOR_FIRSTNAME Jean-Christophe
+AUTHOR_LASTNAME Bernadac
+TRANSLATOR_PREFIX NULL
+TRANSLATOR_FIRSTNAME NULL
+TRANSLATOR_LASTNAME NULL
+TITLE Construire une application XML
+PUBLISHER_NAME Eyrolles
+PUBLISHER_PLACE Paris
+DATEPUB 1999
+ISBN 9782212090819
+LANG fr
+SUBJECT applications
+AUTHOR_FIRSTNAME François
+AUTHOR_LASTNAME Knab
+TRANSLATOR_PREFIX NULL
+TRANSLATOR_FIRSTNAME NULL
+TRANSLATOR_LASTNAME NULL
+TITLE Construire une application XML
+PUBLISHER_NAME Eyrolles
+PUBLISHER_PLACE Paris
+DATEPUB 1999
+ISBN 9782840825685
+LANG fr
+SUBJECT applications
+AUTHOR_FIRSTNAME William J.
+AUTHOR_LASTNAME Pardi
+TRANSLATOR_PREFIX adapté de l'anglais par
+TRANSLATOR_FIRSTNAME James
+TRANSLATOR_LASTNAME Guerin
+TITLE XML en Action
+PUBLISHER_NAME Microsoft Press
+PUBLISHER_PLACE Paris
+DATEPUB 1999
+ISBN 9782212090529
+LANG fr
+SUBJECT général
+AUTHOR_FIRSTNAME Alain
+AUTHOR_LASTNAME Michard
+TRANSLATOR_PREFIX NULL
+TRANSLATOR_FIRSTNAME NULL
+TRANSLATOR_LASTNAME NULL
+TITLE XML, Langage et Applications
+PUBLISHER_NAME Eyrolles
+PUBLISHER_PLACE Paris
+DATEPUB 2003
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='xmlsup=libxml2';
+SELECT * FROM t2;
+ISBN 9782212090819
+LANG fr
+SUBJECT applications
+AUTHOR Jean-Christophe Bernadac
+TRANSLATOR NULL
+TITLE Construire une application XML
+PUBLISHER Eyrolles Paris
+DATEPUB 1999
+ISBN 9782840825685
+LANG fr
+SUBJECT applications
+AUTHOR William J. Pardi
+TRANSLATOR James Guerin
+TITLE XML en Action
+PUBLISHER Microsoft Press Paris
+DATEPUB 1999
+ISBN 9782212090529
+LANG fr
+SUBJECT général
+AUTHOR Alain Michard
+TRANSLATOR NULL
+TITLE XML, Langage et Applications
+PUBLISHER Eyrolles Paris
+DATEPUB 2003
+DROP TABLE t1,t2;
diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result
new file mode 100644
index 00000000000..c03b27bd428
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/zip.result
@@ -0,0 +1,240 @@
+#
+# Testing zipped DOS tables
+#
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new1.dos' ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new2.dos,APPEND=1' ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+digit letter
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t3 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='MULENTRIES=1' ZIPPED=1;
+SELECT * FROM t3;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newdos.zip';
+SELECT * FROM t4;
+fn cmpsize uncsize method
+new1.dos 67 79 8
+new2.dos 77 112 8
+DROP TABLE t1,t2,t3,t4;
+#
+# Testing zipped CSV tables
+#
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+CREATE TABLE td1
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+SELECT * FROM td1;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+DROP TABLE td1;
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new2.csv,APPEND=1' HEADER=1 ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+digit letter
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t3
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='MULENTRIES=1' HEADER=1 ZIPPED=1;
+SELECT * FROM t3;
+digit letter
+1 One
+2 Two
+3 Three
+4 Four
+5 Five
+6 Six
+7 Seven
+8 Eight
+9 Nine
+10 Ten
+11 Eleven
+12 Twelve
+13 Thirteen
+14 Fourteen
+15 Fiften
+16 Sixteen
+17 Seventeen
+18 Eighteen
+19 Nineteen
+20 Twenty
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newcsv.zip';
+SELECT * FROM t4;
+fn cmpsize uncsize method
+new1.csv 79 83 8
+new2.csv 94 125 8
+DROP TABLE t1,t2,t3,t4;
+#
+# Testing zipped JSON tables
+#
+CREATE TABLE t1 (
+_id INT(2) NOT NULL,
+name_first CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+name_last CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth CHAR(20) DEFAULT NULL,
+death CHAR(20) DEFAULT NULL,
+contribs CHAR(7) NOT NULL FIELD_FORMAT='contribs:',
+awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards::award',
+awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards::year',
+awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='awards::by'
+) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
+SELECT * FROM t1;
+_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
+1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
+2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM
+3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association
+4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation
+7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM
+8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation
+9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist
+10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
+OPTION_LIST='LEVEL=5';
+SELECT * FROM t2;
+_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
+1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
+2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM
+3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association
+4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation
+7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM
+8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation
+9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist
+10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
+CREATE TABLE t3 (
+_id INT(2) NOT NULL,
+firstname CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+lastname CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+contribs CHAR(64) NOT NULL FIELD_FORMAT='contribs:[", "]',
+award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards:[x]:award',
+year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards:[x]:year',
+`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='awards:[x]:by'
+) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
+SELECT * FROM t3 WHERE _id = 1;
+_id firstname aka lastname title birth death contribs award year by
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP W.W. McDowell Award 1967 IEEE Computer Society
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP National Medal of Science 1975 National Science Foundation
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP Turing Award 1977 ACM
+1 John NULL Backus NULL 1924-03-12 2008-05-03 Fortran, ALGOL, Backus-Naur Form, FP Draper Prize 1993 National Academy of Engineering
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='bios.zip';
+SELECT * FROM t4;
+fn cmpsize uncsize method
+bios.json 1096 6848 8
+DROP TABLE t1,t2,t3,t4;
diff --git a/storage/connect/mysql-test/connect/std_data/bios.json b/storage/connect/mysql-test/connect/std_data/bios.json
new file mode 100644
index 00000000000..85e4ecb933f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/bios.json
@@ -0,0 +1,273 @@
+[
+ {
+ "_id" : 1,
+ "name" : {
+ "first" : "John",
+ "last" : "Backus"
+ },
+ "birth" : "1924-12-03T05:00:00Z",
+ "death" : "2007-03-17T04:00:00Z",
+ "contribs" : [
+ "Fortran",
+ "ALGOL",
+ "Backus-Naur Form",
+ "FP"
+ ],
+ "awards" : [
+ {
+ "award" : "W.W. McDowell Award",
+ "year" : 1967,
+ "by" : "IEEE Computer Society"
+ },
+ {
+ "award" : "National Medal of Science",
+ "year" : 1975,
+ "by" : "National Science Foundation"
+ },
+ {
+ "award" : "Turing Award",
+ "year" : 1977,
+ "by" : "ACM"
+ },
+ {
+ "award" : "Draper Prize",
+ "year" : 1993,
+ "by" : "National Academy of Engineering"
+ }
+ ]
+ },
+ {
+ "_id" : 2,
+ "name" : {
+ "first" : "John",
+ "last" : "McCarthy"
+ },
+ "birth" : "1927-09-04T04:00:00Z",
+ "death" : "2011-12-24T05:00:00Z",
+ "contribs" : [
+ "Lisp",
+ "Artificial Intelligence",
+ "ALGOL"
+ ],
+ "awards" : [
+ {
+ "award" : "Turing Award",
+ "year" : 1971,
+ "by" : "ACM"
+ },
+ {
+ "award" : "Kyoto Prize",
+ "year" : 1988,
+ "by" : "Inamori Foundation"
+ },
+ {
+ "award" : "National Medal of Science",
+ "year" : 1990,
+ "by" : "National Science Foundation"
+ }
+ ]
+ },
+ {
+ "_id" : 3,
+ "name" : {
+ "first" : "Grace",
+ "last" : "Hopper"
+ },
+ "title" : "Rear Admiral",
+ "birth" : "1906-12-09T05:00:00Z",
+ "death" : "1992-01-01T05:00:00Z",
+ "contribs" : [
+ "UNIVAC",
+ "compiler",
+ "FLOW-MATIC",
+ "COBOL"
+ ],
+ "awards" : [
+ {
+ "award" : "Computer Sciences Man of the Year",
+ "year" : 1969,
+ "by" : "Data Processing Management Association"
+ },
+ {
+ "award" : "Distinguished Fellow",
+ "year" : 1973,
+ "by" : " British Computer Society"
+ },
+ {
+ "award" : "W. W. McDowell Award",
+ "year" : 1976,
+ "by" : "IEEE Computer Society"
+ },
+ {
+ "award" : "National Medal of Technology",
+ "year" : 1991,
+ "by" : "United States"
+ }
+ ]
+ },
+ {
+ "_id" : 4,
+ "name" : {
+ "first" : "Kristen",
+ "last" : "Nygaard"
+ },
+ "birth" : "1926-08-27T04:00:00Z",
+ "death" : "2002-08-10T04:00:00Z",
+ "contribs" : [
+ "OOP",
+ "Simula"
+ ],
+ "awards" : [
+ {
+ "award" : "Rosing Prize",
+ "year" : 1999,
+ "by" : "Norwegian Data Association"
+ },
+ {
+ "award" : "Turing Award",
+ "year" : 2001,
+ "by" : "ACM"
+ },
+ {
+ "award" : "IEEE John von Neumann Medal",
+ "year" : 2001,
+ "by" : "IEEE"
+ }
+ ]
+ },
+ {
+ "_id" : 5,
+ "name" : {
+ "first" : "Ole-Johan",
+ "last" : "Dahl"
+ },
+ "birth" : "1931-10-12T04:00:00Z",
+ "death" : "2002-06-29T04:00:00Z",
+ "contribs" : [
+ "OOP",
+ "Simula"
+ ],
+ "awards" : [
+ {
+ "award" : "Rosing Prize",
+ "year" : 1999,
+ "by" : "Norwegian Data Association"
+ },
+ {
+ "award" : "Turing Award",
+ "year" : 2001,
+ "by" : "ACM"
+ },
+ {
+ "award" : "IEEE John von Neumann Medal",
+ "year" : 2001,
+ "by" : "IEEE"
+ }
+ ]
+ },
+ {
+ "_id" : 6,
+ "name" : {
+ "first" : "Guido",
+ "last" : "van Rossum"
+ },
+ "birth" : "1956-01-31T05:00:00Z",
+ "contribs" : [
+ "Python"
+ ],
+ "awards" : [
+ {
+ "award" : "Award for the Advancement of Free Software",
+ "year" : 2001,
+ "by" : "Free Software Foundation"
+ },
+ {
+ "award" : "NLUUG Award",
+ "year" : 2003,
+ "by" : "NLUUG"
+ }
+ ]
+ },
+ {
+ "_id" : 7,
+ "name" : {
+ "first" : "Dennis",
+ "last" : "Ritchie"
+ },
+ "birth" : "1941-09-09T04:00:00Z",
+ "death" : "2011-10-12T04:00:00Z",
+ "contribs" : [
+ "UNIX",
+ "C"
+ ],
+ "awards" : [
+ {
+ "award" : "Turing Award",
+ "year" : 1983,
+ "by" : "ACM"
+ },
+ {
+ "award" : "National Medal of Technology",
+ "year" : 1998,
+ "by" : "United States"
+ },
+ {
+ "award" : "Japan Prize",
+ "year" : 2011,
+ "by" : "The Japan Prize Foundation"
+ }
+ ]
+ },
+ {
+ "_id" : 8,
+ "name" : {
+ "first" : "Yukihiro",
+ "aka" : "Matz",
+ "last" : "Matsumoto"
+ },
+ "birth" : "1965-04-14T04:00:00Z",
+ "contribs" : [
+ "Ruby"
+ ],
+ "awards" : [
+ {
+ "award" : "Award for the Advancement of Free Software",
+ "year" : "2011",
+ "by" : "Free Software Foundation"
+ }
+ ]
+ },
+ {
+ "_id" : 9,
+ "name" : {
+ "first" : "James",
+ "last" : "Gosling"
+ },
+ "birth" : "1955-05-19T04:00:00Z",
+ "contribs" : [
+ "Java"
+ ],
+ "awards" : [
+ {
+ "award" : "The Economist Innovation Award",
+ "year" : 2002,
+ "by" : "The Economist"
+ },
+ {
+ "award" : "Officer of the Order of Canada",
+ "year" : 2007,
+ "by" : "Canada"
+ }
+ ]
+ },
+ {
+ "_id" : 10,
+ "name" : {
+ "first" : "Martin",
+ "last" : "Odersky"
+ },
+ "contribs" : [
+ "Scala"
+ ]
+ }
+]
diff --git a/storage/connect/mysql-test/connect/std_data/xsample2.xml b/storage/connect/mysql-test/connect/std_data/xsample2.xml
new file mode 100644
index 00000000000..35295844370
--- /dev/null
+++ b/storage/connect/mysql-test/connect/std_data/xsample2.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<BIBLIO SUBJECT="XML">
+ <BOOK ISBN="9782212090819" LANG="fr" SUBJECT="applications">
+ <AUTHOR>
+ <FIRSTNAME>Jean-Christophe</FIRSTNAME>
+ <LASTNAME>Bernadac</LASTNAME>
+ </AUTHOR>
+ <AUTHOR>
+ <FIRSTNAME>François</FIRSTNAME>
+ <LASTNAME>Knab</LASTNAME>
+ </AUTHOR>
+ <TITLE>Construire une application XML</TITLE>
+ <PUBLISHER>
+ <NAME>Eyrolles</NAME>
+ <PLACE>Paris</PLACE>
+ </PUBLISHER>
+ <DATEPUB>1999</DATEPUB>
+ </BOOK>
+ <BOOK ISBN="9782840825685" LANG="fr" SUBJECT="applications">
+ <AUTHOR>
+ <FIRSTNAME>William J.</FIRSTNAME>
+ <LASTNAME>Pardi</LASTNAME>
+ </AUTHOR>
+ <TRANSLATOR PREFIX="adapté de l'anglais par">
+ <FIRSTNAME>James</FIRSTNAME>
+ <LASTNAME>Guerin</LASTNAME>
+ </TRANSLATOR>
+ <TITLE>XML en Action</TITLE>
+ <PUBLISHER>
+ <NAME>Microsoft Press</NAME>
+ <PLACE>Paris</PLACE>
+ </PUBLISHER>
+ <DATEPUB>1999</DATEPUB>
+ </BOOK>
+ <BOOK ISBN="9782212090529" LANG="fr" SUBJECT="général">
+ <AUTHOR>
+ <FIRSTNAME>Alain</FIRSTNAME>
+ <LASTNAME>Michard</LASTNAME>
+ </AUTHOR>
+ <TITLE>XML, Langage et Applications</TITLE>
+ <PUBLISHER>
+ <NAME>Eyrolles</NAME>
+ <PLACE>Paris</PLACE>
+ </PUBLISHER>
+ <DATEPUB>2003</DATEPUB>
+ </BOOK>
+</BIBLIO>
diff --git a/storage/connect/mysql-test/connect/t/have_zip.inc b/storage/connect/mysql-test/connect/t/have_zip.inc
new file mode 100644
index 00000000000..d1283fc1d38
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/have_zip.inc
@@ -0,0 +1,19 @@
+--disable_query_log
+--error 0,ER_UNKNOWN_ERROR
+CREATE TABLE t1 (a CHAR(10)) ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='test.zip';
+if ($mysql_errno)
+{
+ Skip No ZIP support;
+}
+#if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
+# WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+# AND ENGINE='CONNECT'
+# AND CREATE_OPTIONS LIKE '%`table_type`=ZIP%'
+# AND CREATE OPTIONS LIKE "%`file_name`='test.zip'%"`)
+#{
+# DROP TABLE IF EXISTS t1;
+# Skip Need ZIP support;
+#}
+DROP TABLE t1;
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test
new file mode 100644
index 00000000000..d8c7894f861
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/xml_zip.test
@@ -0,0 +1,41 @@
+--source have_zip.inc
+--source have_libxml2.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--vertical_results
+
+--copy_file $MTR_SUITE_DIR/std_data/xsample2.xml $MYSQLD_DATADIR/test/xsample2.xml
+
+--echo #
+--echo # Testing zipped XML tables
+--echo #
+CREATE TABLE t1 (
+ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
+LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
+SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+TITLE CHAR(30) NOT NULL,
+PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+DATEPUB CHAR(4) NOT NULL
+) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+SELECT * FROM t1;
+
+#testing discovery
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
+OPTION_LIST='xmlsup=libxml2';
+SELECT * FROM t2;
+DROP TABLE t1,t2;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/xsample2.xml
+--remove_file $MYSQLD_DATADIR/test/xsample2.zip
diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test
new file mode 100644
index 00000000000..a4892e9ed4e
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/zip.test
@@ -0,0 +1,136 @@
+--source have_zip.inc
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/bios.json $MYSQLD_DATADIR/test/bios.json
+
+--echo #
+--echo # Testing zipped DOS tables
+--echo #
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new1.dos' ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='ENTRY=new2.dos,APPEND=1' ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='newdos.zip'
+OPTION_LIST='MULENTRIES=1' ZIPPED=1;
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newdos.zip';
+SELECT * FROM t4;
+DROP TABLE t1,t2,t3,t4;
+
+--echo #
+--echo # Testing zipped CSV tables
+--echo #
+CREATE TABLE t1 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+INSERT INTO t1 VALUES(1,'One'),(2,'Two'),(3,'Three'),(4,'Four'),(5,'Five'),(6,'Six'),(7,'Seven'),(8,'Eight'),(9,'Nine'),(10,'Ten');
+SELECT * FROM t1;
+
+# Test discovery
+CREATE TABLE td1
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new1.csv' HEADER=1 ZIPPED=1;
+SELECT * FROM td1;
+DROP TABLE td1;
+
+CREATE TABLE t2 (
+digit INT(3) NOT NULL,
+letter CHAR(16) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='ENTRY=new2.csv,APPEND=1' HEADER=1 ZIPPED=1;
+INSERT INTO t2 VALUES(11,'Eleven'),(12,'Twelve'),(13,'Thirteen'),(14,'Fourteen'),(15,'Fiften'),(16,'Sixteen'),(17,'Seventeen'),(18,'Eighteen'),(19,'Nineteen'),(20,'Twenty');
+SELECT * FROM t2;
+
+CREATE TABLE t3
+ENGINE=CONNECT TABLE_TYPE=CSV FILE_NAME='newcsv.zip'
+OPTION_LIST='MULENTRIES=1' HEADER=1 ZIPPED=1;
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='newcsv.zip';
+SELECT * FROM t4;
+DROP TABLE t1,t2,t3,t4;
+
+--echo #
+--echo # Testing zipped JSON tables
+--echo #
+CREATE TABLE t1 (
+_id INT(2) NOT NULL,
+name_first CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+name_last CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth CHAR(20) DEFAULT NULL,
+death CHAR(20) DEFAULT NULL,
+contribs CHAR(7) NOT NULL FIELD_FORMAT='contribs:',
+awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards::award',
+awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards::year',
+awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='awards::by'
+) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
+SELECT * FROM t1;
+
+# Test discovery
+CREATE TABLE t2
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
+OPTION_LIST='LEVEL=5';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+_id INT(2) NOT NULL,
+firstname CHAR(9) NOT NULL FIELD_FORMAT='name:first',
+aka CHAR(4) DEFAULT NULL FIELD_FORMAT='name:aka',
+lastname CHAR(10) NOT NULL FIELD_FORMAT='name:last',
+title CHAR(12) DEFAULT NULL,
+birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
+contribs CHAR(64) NOT NULL FIELD_FORMAT='contribs:[", "]',
+award CHAR(42) DEFAULT NULL FIELD_FORMAT='awards:[x]:award',
+year CHAR(4) DEFAULT NULL FIELD_FORMAT='awards:[x]:year',
+`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='awards:[x]:by'
+) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
+SELECT * FROM t3 WHERE _id = 1;
+
+CREATE TABLE t4 (
+fn VARCHAR(256)NOT NULL,
+cmpsize BIGINT NOT NULL FLAG=1,
+uncsize BIGINT NOT NULL FLAG=2,
+method INT NOT NULL FLAG=3)
+ENGINE=CONNECT TABLE_TYPE=ZIP FILE_NAME='bios.zip';
+SELECT * FROM t4;
+DROP TABLE t1,t2,t3,t4;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/newdos.zip
+--remove_file $MYSQLD_DATADIR/test/newcsv.zip
+--remove_file $MYSQLD_DATADIR/test/bios.zip
+--remove_file $MYSQLD_DATADIR/test/bios.json
+
diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp
index 7320f4cc1d9..433e392eace 100644
--- a/storage/connect/odbconn.cpp
+++ b/storage/connect/odbconn.cpp
@@ -35,8 +35,8 @@
#include "global.h"
#include "plgdbsem.h"
#include "xobject.h"
-//#include "kindex.h"
#include "xtable.h"
+#include "tabext.h"
#include "odbccat.h"
#include "tabodbc.h"
#include "plgcnx.h" // For DB types
@@ -413,12 +413,20 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table,
/**************************************************************************/
PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, POPARM sop)
{
+ char *sqry;
ODBConn *ocp = new(g) ODBConn(g, NULL);
if (ocp->Open(dsn, sop, 10) < 1) // openReadOnly + noOdbcDialog
return NULL;
- return ocp->GetMetaData(g, dsn, src);
+ if (strstr(src, "%s")) {
+ // Place holder for an eventual where clause
+ sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 3);
+ sprintf(sqry, src, "1=1", "1=1"); // dummy where clause
+ } else
+ sqry = src;
+
+ return ocp->GetMetaData(g, dsn, sqry);
} // end of ODBCSrcCols
#if 0
@@ -1417,7 +1425,7 @@ int ODBConn::ExecDirectSQL(char *sql, ODBCCOL *tocols)
b = true;
if (trace)
- htrc("ExecDirect hstmt=%p %.64s\n", hstmt, sql);
+ htrc("ExecDirect hstmt=%p %.256s\n", hstmt, sql);
if (m_Tdb->Srcdef) {
// Be sure this is a query returning a result set
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h
index cb408494319..800b1098d50 100644
--- a/storage/connect/plgdbsem.h
+++ b/storage/connect/plgdbsem.h
@@ -1,7 +1,7 @@
/************** PlgDBSem H Declares Source Code File (.H) **************/
/* Name: PLGDBSEM.H Version 3.7 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* This file contains the CONNECT storage engine definitions. */
/***********************************************************************/
@@ -57,7 +57,7 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */
TAB_FIX = 2, /* Fixed column offset, fixed LRECL */
TAB_BIN = 3, /* Like FIX but can have binary fields */
TAB_CSV = 4, /* DOS files with CSV records */
- TAB_FMT = 5, /* DOS files with formatted recordss */
+ TAB_FMT = 5, /* DOS files with formatted records */
TAB_DBF = 6, /* DBF Dbase or Foxpro files */
TAB_XML = 7, /* XML or HTML files */
TAB_INI = 8, /* INI or CFG files */
@@ -212,11 +212,24 @@ enum OPVAL {OP_EQ = 1, /* Filtering operator = */
OP_SUB = 17, /* Expression Substract operator */
OP_MULT = 18, /* Expression Multiply operator */
OP_DIV = 19, /* Expression Divide operator */
- OP_NOP = 21, /* Scalar function is nopped */
OP_NUM = 22, /* Scalar function Op Num */
- OP_ABS = 23, /* Scalar function Op Abs */
OP_MAX = 24, /* Scalar function Op Max */
OP_MIN = 25, /* Scalar function Op Min */
+ OP_EXP = 36, /* Scalar function Op Exp */
+ OP_FDISK = 94, /* Operator Disk of fileid */
+ OP_FPATH = 95, /* Operator Path of fileid */
+ OP_FNAME = 96, /* Operator Name of fileid */
+ OP_FTYPE = 97, /* Operator Type of fileid */
+ OP_LAST = 82, /* Index operator Find Last */
+ OP_FIRST = 106, /* Index operator Find First */
+ OP_NEXT = 107, /* Index operator Find Next */
+ OP_SAME = 108, /* Index operator Find Next Same */
+ OP_FSTDIF = 109, /* Index operator Find First dif */
+ OP_NXTDIF = 110, /* Index operator Find Next dif */
+ OP_PREV = 116}; /* Index operator Find Previous */
+#if 0
+ OP_NOP = 21, /* Scalar function is nopped */
+ OP_ABS = 23, /* Scalar function Op Abs */
OP_CEIL = 26, /* Scalar function Op Ceil */
OP_FLOOR = 27, /* Scalar function Op Floor */
OP_MOD = 28, /* Scalar function Op Mod */
@@ -312,6 +325,7 @@ enum OPVAL {OP_EQ = 1, /* Filtering operator = */
OP_REMOVE = 201, /* Scalar function Op Remove */
OP_RENAME = 202, /* Scalar function Op Rename */
OP_FCOMP = 203}; /* Scalar function Op Compare */
+#endif // 0
enum TUSE {USE_NO = 0, /* Table is not yet linearized */
USE_LIN = 1, /* Table is linearized */
@@ -356,6 +370,7 @@ typedef class XOBJECT *PXOB;
typedef class COLBLK *PCOL;
typedef class TDB *PTDB;
typedef class TDBASE *PTDBASE;
+typedef class TDBEXT *PTDBEXT;
typedef class TDBDOS *PTDBDOS;
typedef class TDBFIX *PTDBFIX;
typedef class TDBFMT *PTDBFMT;
@@ -374,6 +389,7 @@ typedef class KXYCOL *PXCOL;
typedef class CATALOG *PCATLG;
typedef class RELDEF *PRELDEF;
typedef class TABDEF *PTABDEF;
+typedef class EXTDEF *PEXTBDEF;
typedef class DOSDEF *PDOSDEF;
typedef class CSVDEF *PCSVDEF;
typedef class VCTDEF *PVCTDEF;
@@ -619,4 +635,4 @@ int global_open(GLOBAL *g, int msgid, const char *filename, int flags, int mode)
DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir);
char *MakeEscape(PGLOBAL g, char* str, char q);
-DllExport bool PushWarning(PGLOBAL, PTDBASE, int level = 1);
+DllExport bool PushWarning(PGLOBAL, PTDB, int level = 1);
diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp
index 83975c6d8fa..1910cdcdec8 100644
--- a/storage/connect/plgdbutl.cpp
+++ b/storage/connect/plgdbutl.cpp
@@ -1,11 +1,11 @@
/********** PlgDBUtl Fpe C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: PLGDBUTL */
/* ------------- */
-/* Version 3.9 */
+/* Version 4.0 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -939,7 +939,11 @@ int PlugCloseFile(PGLOBAL g __attribute__((unused)), PFBLOCK fp, bool all)
#endif // LIBXML2_SUPPORT
#ifdef ZIP_SUPPORT
case TYPE_FB_ZIP:
- ((ZIPUTIL*)fp->File)->close();
+ if (fp->Mode == MODE_INSERT)
+ ((ZIPUTIL*)fp->File)->close();
+ else
+ ((UNZIPUTL*)fp->File)->close();
+
fp->Memory = NULL;
fp->Mode = MODE_ANY;
fp->Count = 0;
@@ -1119,7 +1123,7 @@ char *GetAmName(PGLOBAL g, AMT am, void *memp)
return amn;
} // end of GetAmName
-#if defined(__WIN__) && !defined(NOCATCH)
+#if defined(SE_CATCH)
/***********************************************************************/
/* GetExceptionDesc: return the description of an exception code. */
/***********************************************************************/
@@ -1207,7 +1211,7 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e)
return p;
} // end of GetExceptionDesc
-#endif // __WIN__ && !NOCATCH
+#endif // SE_CATCH
/***********************************************************************/
/* PlgDBalloc: allocates or suballocates memory conditionally. */
diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp
index 71b72621b06..eb31e24235b 100644
--- a/storage/connect/plgxml.cpp
+++ b/storage/connect/plgxml.cpp
@@ -1,6 +1,6 @@
/******************************************************************/
/* Implementation of XML document processing using PdbXML. */
-/* Author: Olivier Bertrand 2007-2012 */
+/* Author: Olivier Bertrand 2007-2017 */
/******************************************************************/
#include "my_global.h"
#include "global.h"
@@ -49,7 +49,7 @@ bool XMLDOCUMENT::InitZip(PGLOBAL g, char *entry)
{
#if defined(ZIP_SUPPORT)
bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false;
- zip = new(g) ZIPUTIL(entry, mul);
+ zip = new(g) UNZIPUTL(entry, mul);
return zip == NULL;
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
diff --git a/storage/connect/plgxml.h b/storage/connect/plgxml.h
index db7dfa6bda5..6870764c503 100644
--- a/storage/connect/plgxml.h
+++ b/storage/connect/plgxml.h
@@ -101,7 +101,7 @@ class XMLDOCUMENT : public BLOCK {
// Members
#if defined(ZIP_SUPPORT)
- ZIPUTIL *zip; /* Used for zipped file */
+ UNZIPUTL *zip; /* Used for zipped file */
#else // !ZIP_SUPPORT
bool zip; /* Always false */
#endif // !ZIP_SUPPORT
diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.c
index 2551b603349..bfac8a5fd99 100644
--- a/storage/connect/plugutil.c
+++ b/storage/connect/plugutil.c
@@ -244,6 +244,9 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath)
char *drive = NULL, *defdrv = NULL;
#endif
+ if (trace > 1)
+ htrc("prefix=%s fn=%s path=%s\n", prefix, FileName, defpath);
+
if (!strncmp(FileName, "//", 2) || !strncmp(FileName, "\\\\", 2)) {
strcpy(pBuff, FileName); // Remote file
return pBuff;
diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp
index 30e4d49d249..5bb7848ab1c 100644
--- a/storage/connect/reldef.cpp
+++ b/storage/connect/reldef.cpp
@@ -621,8 +621,8 @@ bool OEMDEF::DefineAM(PGLOBAL g, LPCSTR, int)
/***********************************************************************/
PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode)
{
- RECFM rfm;
- PTDBASE tdbp = NULL;
+ RECFM rfm;
+ PTDB tdbp = NULL;
// If define block not here yet, get it now
if (!Pxdef && !(Pxdef = GetXdef(g)))
@@ -632,7 +632,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode)
/* Allocate a TDB of the proper type. */
/* Column blocks will be allocated only when needed. */
/*********************************************************************/
- if (!(tdbp = (PTDBASE)Pxdef->GetTable(g, mode)))
+ if (!(tdbp = Pxdef->GetTable(g, mode)))
return NULL;
else
rfm = tdbp->GetFtype();
diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h
index bc1bd2ddd74..52a131dbf3d 100644
--- a/storage/connect/reldef.h
+++ b/storage/connect/reldef.h
@@ -64,15 +64,16 @@ class DllExport RELDEF : public BLOCK { // Relation definition block
}; // end of RELDEF
/***********************************************************************/
-/* These classes correspond to the data base description contained in */
-/* a .XDB file the A.M. DOS, FIX, CSV, MAP, BIN, VCT, PLG, ODBC, DOM. */
+/* This class corresponds to the data base description for tables */
+/* of type DOS, FIX, CSV, DBF, BIN, VCT, JSON, XML... */
/***********************************************************************/
class DllExport TABDEF : public RELDEF { /* Logical table descriptor */
friend class CATALOG;
friend class PLUGCAT;
friend class MYCAT;
- friend class TDBASE;
- public:
+ friend class TDB;
+ friend class TDBEXT;
+public:
// Constructor
TABDEF(void); // Constructor
@@ -112,11 +113,11 @@ class DllExport TABDEF : public RELDEF { /* Logical table descriptor */
int Sort; /* Table already sorted ??? */
int Multiple; /* 0: No 1: DIR 2: Section 3: filelist */
int Degree; /* Number of columns in the table */
- int Pseudo; /* Bit: 1 ROWID Ok, 2 FILEID Ok */
+ int Pseudo; /* Bit: 1 ROWID }Ok, 2 FILEID Ok */
bool Read_Only; /* true for read only tables */
const CHARSET_INFO *m_data_charset;
const char *csname; /* Table charset name */
- }; // end of TABDEF
+}; // end of TABDEF
/***********************************************************************/
/* Externally defined OEM tables. */
@@ -190,11 +191,12 @@ class DllExport COLCRT : public BLOCK { /* Column description block
/***********************************************************************/
/* Column definition block. */
/***********************************************************************/
-class DllExport COLDEF : public COLCRT { /* Column description block */
+class DllExport COLDEF : public COLCRT { /* Column description block */
friend class TABDEF;
friend class COLBLK;
friend class DBFFAM;
- friend class TDBASE;
+ friend class TDB;
+ friend class TDBASE;
friend class TDBDOS;
public:
COLDEF(void); // Constructor
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 16cc6c33b44..d2bb3d7a4af 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -102,6 +102,7 @@ DOSDEF::DOSDEF(void)
Mapped = false;
Zipped = false;
Mulentries = false;
+ Append = false;
Padded = false;
Huge = false;
Accept = false;
@@ -132,10 +133,13 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
: (am && (*am == 'B' || *am == 'b')) ? "B"
: (am && !stricmp(am, "DBF")) ? "D" : "V";
- if ((Zipped = GetBoolCatInfo("Zipped", false)))
- Mulentries = ((Entry = GetStringCatInfo(g, "Entry", NULL)))
- ? strchr(Entry, '*') || strchr(Entry, '?')
- : GetBoolCatInfo("Mulentries", false);
+ if ((Zipped = GetBoolCatInfo("Zipped", false))) {
+ Entry = GetStringCatInfo(g, "Entry", NULL);
+ Mulentries = (Entry && *Entry) ? strchr(Entry, '*') || strchr(Entry, '?')
+ : false;
+ Mulentries = GetBoolCatInfo("Mulentries", Mulentries);
+ Append = GetBoolCatInfo("Append", false);
+ }
Desc = Fn = GetStringCatInfo(g, "Filename", NULL);
Ofn = GetStringCatInfo(g, "Optname", Fn);
@@ -347,10 +351,26 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
if (Zipped) {
#if defined(ZIP_SUPPORT)
if (Recfm == RECFM_VAR) {
- txfp = new(g)ZIPFAM(this);
- tdbp = new(g)TDBDOS(this, txfp);
+ if (mode == MODE_READ || mode == MODE_ANY) {
+ txfp = new(g) UNZFAM(this);
+ } else if (mode == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's mode
+
+ tdbp = new(g) TDBDOS(this, txfp);
} else {
- txfp = new(g)ZPXFAM(this);
+ if (mode == MODE_READ || mode == MODE_ANY) {
+ txfp = new(g) UZXFAM(this);
+ } else if (mode == MODE_INSERT) {
+ txfp = new(g) ZPXFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's mode
+
tdbp = new(g)TDBFIX(this, txfp);
} // endif Recfm
@@ -376,7 +396,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
txfp = new(g) MPXFAM(this);
else if (Compressed) {
#if defined(GZ_SUPPORT)
- txfp = new(g) ZIXFAM(this);
+ txfp = new(g) GZXFAM(this);
#else // !GZ_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ");
return NULL;
@@ -484,7 +504,7 @@ TDBDOS::TDBDOS(PGLOBAL g, PTDBDOS tdbp) : TDBASE(tdbp)
} // end of TDBDOS copy constructor
// Method
-PTDB TDBDOS::CopyOne(PTABS t)
+PTDB TDBDOS::Clone(PTABS t)
{
PTDB tp;
PDOSCOL cp1, cp2;
@@ -498,7 +518,7 @@ PTDB TDBDOS::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate DOS column description block. */
diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h
index 4c8eb438a26..922d52ee399 100644
--- a/storage/connect/tabdos.h
+++ b/storage/connect/tabdos.h
@@ -28,7 +28,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
friend class TDBFIX;
friend class TXTFAM;
friend class DBFBASE;
- friend class ZIPUTIL;
+ friend class UNZIPUTL;
public:
// Constructor
DOSDEF(void);
@@ -43,7 +43,8 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
PSZ GetOfn(void) {return Ofn;}
PSZ GetEntry(void) {return Entry;}
bool GetMul(void) {return Mulentries;}
- void SetBlock(int block) {Block = block;}
+ bool GetAppend(void) {return Append;}
+ void SetBlock(int block) { Block = block; }
int GetBlock(void) {return Block;}
int GetLast(void) {return Last;}
void SetLast(int last) {Last = last;}
@@ -81,6 +82,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
bool Mapped; /* 0: disk file, 1: memory mapped file */
bool Zipped; /* true for zipped table file */
bool Mulentries; /* true for multiple entries */
+ bool Append; /* Used when creating zipped table */
bool Padded; /* true for padded table file */
bool Huge; /* true for files larger than 2GB */
bool Accept; /* true if wrong lines are accepted */
@@ -140,7 +142,7 @@ class DllExport TDBDOS : public TDBASE {
{return (PTDB)new(g) TDBDOS(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual void ResetDB(void) {Txfp->Reset();}
virtual bool IsUsingTemp(PGLOBAL g);
virtual bool IsIndexed(void) {return Indxd;}
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
new file mode 100644
index 00000000000..e3518126a49
--- /dev/null
+++ b/storage/connect/tabext.cpp
@@ -0,0 +1,640 @@
+/************* Tabext C++ Functions Source Code File (.CPP) ************/
+/* Name: TABEXT.CPP Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* */
+/* This file contains the TBX, TDB and OPJOIN classes functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant MariaDB header file. */
+/***********************************************************************/
+#define MYSQL_SERVER 1
+#include "my_global.h"
+#include "sql_class.h"
+#include "sql_servers.h"
+#include "sql_string.h"
+#if !defined(__WIN__)
+#include "osutil.h"
+#endif
+
+/***********************************************************************/
+/* Include required application header files */
+/* global.h is header containing all global Plug declarations. */
+/* plgdbsem.h is header containing the DB applic. declarations. */
+/* xobject.h is header containing XOBJECT derived classes declares. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "xtable.h"
+#include "tabext.h"
+#include "ha_connect.h"
+
+/* -------------------------- Class CONDFIL -------------------------- */
+
+/***********************************************************************/
+/* CONDFIL Constructor. */
+/***********************************************************************/
+CONDFIL::CONDFIL(const Item *cond, uint idx, AMT type)
+{
+ Cond = cond;
+ Idx = idx;
+ Type = type;
+ Op = OP_XX;
+ Cmds = NULL;
+ Alist = NULL;
+ All = true;
+ Bd = false;
+ Hv = false;
+ Body = NULL,
+ Having = NULL;
+} // end of CONDFIL constructor
+
+/***********************************************************************/
+/* Make and allocate the alias list. */
+/***********************************************************************/
+int CONDFIL::Init(PGLOBAL g, PHC hc)
+{
+ PTOS options = hc->GetTableOptionStruct();
+ char *p, *cn, *cal, *alt = NULL;
+ int rc = RC_OK;
+ bool h;
+
+ if (options)
+ alt = GetListOption(g, "Alias", options->oplist, NULL);
+
+ while (alt) {
+ if (!(p = strchr(alt, '='))) {
+ strcpy(g->Message, "Invalid alias list");
+ rc = RC_FX;
+ break;
+ } // endif !p
+
+ cal = alt; // Alias
+ *p++ = 0;
+
+ if ((h = *p == '*')) {
+ rc = RC_INFO;
+ p++;
+ } // endif h
+
+ cn = p; // Remote column name
+
+ if ((alt = strchr(p, ';')))
+ *alt++ = 0;
+
+ if (*cn == 0)
+ cn = alt;
+
+ Alist = new(g) ALIAS(Alist, cn, cal, h);
+ } // endwhile alt
+
+ return rc;
+} // end of Init
+
+/***********************************************************************/
+/* Make and allocate the alias list. */
+/***********************************************************************/
+const char *CONDFIL::Chk(const char *fln, bool *h)
+{
+ for (PAL pal = Alist; pal; pal = pal->Next)
+ if (!stricmp(fln, pal->Alias)) {
+ *h = pal->Having;
+ return pal->Name;
+ } // endif fln
+
+ *h = false;
+ return fln;
+} // end of Chk
+
+/* --------------------------- Class EXTDEF -------------------------- */
+
+/***********************************************************************/
+/* EXTDEF Constructor. */
+/***********************************************************************/
+EXTDEF::EXTDEF(void)
+{
+ Tabname = Tabschema = Username = Password = Tabcat = Tabtyp = NULL;
+ Colpat = Srcdef = Qchar = Qrystr = Sep = Phpos = NULL;
+ Options = Cto = Qto = Quoted = Maxerr = Maxres = Memory = 0;
+ Scrollable = Xsrc = false;
+} // end of EXTDEF constructor
+
+/***********************************************************************/
+/* DefineAM: define specific AM block values from XDB file. */
+/***********************************************************************/
+bool EXTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
+{
+ Desc = NULL;
+ Tabname = GetStringCatInfo(g, "Name",
+ (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+ Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+ Tabschema = GetStringCatInfo(g, "Dbname", NULL);
+ Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
+ Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
+ Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
+ Username = GetStringCatInfo(g, "User", NULL);
+ Password = GetStringCatInfo(g, "Password", NULL);
+
+ if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
+ Read_Only = true;
+
+ Qrystr = GetStringCatInfo(g, "Query_String", "?");
+ Sep = GetStringCatInfo(g, "Separator", NULL);
+//Alias = GetStringCatInfo(g, "Alias", NULL);
+ Phpos = GetStringCatInfo(g, "Phpos", NULL);
+ Xsrc = GetBoolCatInfo("Execsrc", FALSE);
+ Maxerr = GetIntCatInfo("Maxerr", 0);
+ Maxres = GetIntCatInfo("Maxres", 0);
+ Quoted = GetIntCatInfo("Quoted", 0);
+ Options = 0;
+ Cto = 0;
+ Qto = 0;
+
+ if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
+ Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
+
+ if (Catfunc == FNC_COL)
+ Colpat = GetStringCatInfo(g, "Colpat", NULL);
+
+ if (Catfunc == FNC_TABLE)
+ Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
+
+ // Memory was Boolean, it is now integer
+ if (!(Memory = GetIntCatInfo("Memory", 0)))
+ Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
+
+ Pseudo = 2; // FILID is Ok but not ROWID
+ return false;
+} // end of DefineAM
+
+/* ---------------------------TDBEXT class --------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBEXT class. */
+/***********************************************************************/
+TDBEXT::TDBEXT(EXTDEF *tdp) : TDB(tdp)
+{
+ Qrp = NULL;
+
+ if (tdp) {
+ TableName = tdp->Tabname;
+ Schema = tdp->Tabschema;
+ User = tdp->Username;
+ Pwd = tdp->Password;
+ Catalog = tdp->Tabcat;
+ Srcdef = tdp->Srcdef;
+ Qrystr = tdp->Qrystr;
+ Sep = tdp->GetSep();
+ Options = tdp->Options;
+ Cto = tdp->Cto;
+ Qto = tdp->Qto;
+ Quoted = MY_MAX(0, tdp->GetQuoted());
+ Rows = tdp->GetElemt();
+ Memory = tdp->Memory;
+ Scrollable = tdp->Scrollable;
+ } else {
+ TableName = NULL;
+ Schema = NULL;
+ User = NULL;
+ Pwd = NULL;
+ Catalog = NULL;
+ Srcdef = NULL;
+ Qrystr = NULL;
+ Sep = 0;
+ Options = 0;
+ Cto = 0;
+ Qto = 0;
+ Quoted = 0;
+ Rows = 0;
+ Memory = 0;
+ Scrollable = false;
+ } // endif tdp
+
+ Quote = NULL;
+ Query = NULL;
+ Count = NULL;
+ //Where = NULL;
+ MulConn = NULL;
+ DBQ = NULL;
+ Qrp = NULL;
+ Fpos = 0;
+ Curpos = 0;
+ AftRows = 0;
+ CurNum = 0;
+ Rbuf = 0;
+ BufSize = 0;
+ Nparm = 0;
+ Ncol = 0;
+ Placed = false;
+} // end of TDBEXT constructor
+
+TDBEXT::TDBEXT(PTDBEXT tdbp) : TDB(tdbp)
+{
+ Qrp = tdbp->Qrp;
+ TableName = tdbp->TableName;
+ Schema = tdbp->Schema;
+ User = tdbp->User;
+ Pwd = tdbp->Pwd;
+ Catalog = tdbp->Catalog;
+ Srcdef = tdbp->Srcdef;
+ Qrystr = tdbp->Qrystr;
+ Sep = tdbp->Sep;
+ Options = tdbp->Options;
+ Cto = tdbp->Cto;
+ Qto = tdbp->Qto;
+ Quoted = tdbp->Quoted;
+ Rows = tdbp->Rows;
+ Memory = tdbp->Memory;
+ Scrollable = tdbp->Scrollable;
+ Quote = tdbp->Quote;
+ Query = tdbp->Query;
+ Count = tdbp->Count;
+ //Where = tdbp->Where;
+ MulConn = tdbp->MulConn;
+ DBQ = tdbp->DBQ;
+ Fpos = 0;
+ Curpos = 0;
+ AftRows = 0;
+ CurNum = 0;
+ Rbuf = 0;
+ BufSize = tdbp->BufSize;
+ Nparm = tdbp->Nparm;
+ Ncol = tdbp->Ncol;
+ Placed = false;
+} // end of TDBEXT copy constructor
+
+/******************************************************************/
+/* Convert an UTF-8 string to latin characters. */
+/******************************************************************/
+int TDBEXT::Decode(char *txt, char *buf, size_t n)
+{
+ uint dummy_errors;
+ uint32 len = copy_and_convert(buf, n, &my_charset_latin1,
+ txt, strlen(txt),
+ &my_charset_utf8_general_ci,
+ &dummy_errors);
+ buf[len] = '\0';
+ return 0;
+} // end of Decode
+
+/***********************************************************************/
+/* MakeSQL: make the SQL statement use with remote connection. */
+/* TODO: when implementing remote filtering, column only used in */
+/* local filter should be removed from column list. */
+/***********************************************************************/
+bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
+{
+ char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
+ int len;
+ bool oom = false, first = true;
+ PTABLE tablep = To_Table;
+ PCOL colp;
+
+ if (Srcdef) {
+ if ((catp = strstr(Srcdef, "%s"))) {
+ char *fil1, *fil2;
+ PSZ ph = ((EXTDEF*)To_Def)->Phpos;
+
+ if (!ph)
+ ph = (strstr(catp + 2, "%s")) ? const_cast<char*>("WH") :
+ const_cast<char*>("W");
+
+ if (stricmp(ph, "H")) {
+ fil1 = (To_CondFil && *To_CondFil->Body)
+ ? To_CondFil->Body : PlugDup(g, "1=1");
+ } // endif ph
+
+ if (stricmp(ph, "W")) {
+ fil2 = (To_CondFil && To_CondFil->Having && *To_CondFil->Having)
+ ? To_CondFil->Having : PlugDup(g, "1=1");
+ } // endif ph
+
+ if (!stricmp(ph, "W")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1));
+ } else if (!stricmp(ph, "WH")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil1, fil2));
+ } else if (!stricmp(ph, "H")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2));
+ } else if (!stricmp(ph, "HW")) {
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2, fil1));
+ } else {
+ strcpy(g->Message, "MakeSQL: Wrong place holders specification");
+ return true;
+ } // endif's ph
+
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
+ return false;
+ } // endif Srcdef
+
+ // Allocate the string used to contain the Query
+ Query = new(g)STRING(g, 1023, "SELECT ");
+
+ if (!cnt) {
+ if (Columns) {
+ // Normal SQL statement to retrieve results
+ for (colp = Columns; colp; colp = colp->GetNext())
+ if (!colp->IsSpecial()) {
+ if (!first)
+ oom |= Query->Append(", ");
+ else
+ first = false;
+
+ // Column name can be encoded in UTF-8
+ Decode(colp->GetName(), buf, sizeof(buf));
+
+ if (Quote) {
+ // Put column name between identifier quotes in case in contains blanks
+ oom |= Query->Append(Quote);
+ oom |= Query->Append(buf);
+ oom |= Query->Append(Quote);
+ } else
+ oom |= Query->Append(buf);
+
+ ((PEXTCOL)colp)->SetRank(++Ncol);
+ } // endif colp
+
+ } else
+ // !Columns can occur for queries such that sql count(*) from...
+ // for which we will count the rows from sql * from...
+ oom |= Query->Append('*');
+
+ } else
+ // SQL statement used to retrieve the size of the result
+ oom |= Query->Append("count(*)");
+
+ oom |= Query->Append(" FROM ");
+
+ if (Catalog && *Catalog)
+ catp = Catalog;
+
+ //if (tablep->GetSchema())
+ // schmp = (char*)tablep->GetSchema();
+ //else
+ if (Schema && *Schema)
+ schmp = Schema;
+
+ if (catp) {
+ oom |= Query->Append(catp);
+
+ if (schmp) {
+ oom |= Query->Append('.');
+ oom |= Query->Append(schmp);
+ } // endif schmp
+
+ oom |= Query->Append('.');
+ } else if (schmp) {
+ oom |= Query->Append(schmp);
+ oom |= Query->Append('.');
+ } // endif schmp
+
+ // Table name can be encoded in UTF-8
+ Decode(TableName, buf, sizeof(buf));
+
+ if (Quote) {
+ // Put table name between identifier quotes in case in contains blanks
+ oom |= Query->Append(Quote);
+ oom |= Query->Append(buf);
+ oom |= Query->Append(Quote);
+ } else
+ oom |= Query->Append(buf);
+
+ len = Query->GetLength();
+
+ if (To_CondFil) {
+ if (Mode == MODE_READ) {
+ oom |= Query->Append(" WHERE ");
+ oom |= Query->Append(To_CondFil->Body);
+ len = Query->GetLength() + 1;
+ } else
+ len += (strlen(To_CondFil->Body) + 256);
+
+ } else
+ len += ((Mode == MODE_READX) ? 256 : 1);
+
+ if (oom || Query->Resize(len)) {
+ strcpy(g->Message, "MakeSQL: Out of memory");
+ return true;
+ } // endif oom
+
+ if (trace)
+ htrc("Query=%s\n", Query->GetStr());
+
+ return false;
+} // end of MakeSQL
+
+/***********************************************************************/
+/* MakeCommand: make the Update or Delete statement to send to the */
+/* MySQL server. Limited to remote values and filtering. */
+/***********************************************************************/
+bool TDBEXT::MakeCommand(PGLOBAL g)
+{
+ char *p, *stmt, name[68], *body = NULL;
+ char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1);
+ bool qtd = Quoted > 0;
+ int i = 0, k = 0;
+
+ // Make a lower case copy of the originale query and change
+ // back ticks to the data source identifier quoting character
+ do {
+ qrystr[i] = (Qrystr[i] == '`') ? *Quote : tolower(Qrystr[i]);
+ } while (Qrystr[i++]);
+
+ if (To_CondFil && (p = strstr(qrystr, " where "))) {
+ p[7] = 0; // Remove where clause
+ Qrystr[(p - qrystr) + 7] = 0;
+ body = To_CondFil->Body;
+ stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr)
+ + strlen(body) + 64);
+ } else
+ stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
+
+ // Check whether the table name is equal to a keyword
+ // If so, it must be quoted in the original query
+ strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
+
+ if (strstr(" update delete low_priority ignore quick from ", name)) {
+ strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote));
+ k += 2;
+ } else
+ strlwr(strcpy(name, Name)); // Not a keyword
+
+ if ((p = strstr(qrystr, name))) {
+ for (i = 0; i < p - qrystr; i++)
+ stmt[i] = (Qrystr[i] == '`') ? *Quote : Qrystr[i];
+
+ stmt[i] = 0;
+ k += i + (int)strlen(Name);
+
+ if (qtd && *(p - 1) == ' ')
+ strcat(strcat(strcat(stmt, Quote), TableName), Quote);
+ else
+ strcat(stmt, TableName);
+
+ i = (int)strlen(stmt);
+
+ do {
+ stmt[i++] = (Qrystr[k] == '`') ? *Quote : Qrystr[k];
+ } while (Qrystr[k++]);
+
+ if (body)
+ strcat(stmt, body);
+
+ } else {
+ sprintf(g->Message, "Cannot use this %s command",
+ (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
+ return true;
+ } // endif p
+
+ if (trace)
+ htrc("Command=%s\n", stmt);
+
+ Query = new(g)STRING(g, 0, stmt);
+ return (!Query->GetSize());
+} // end of MakeCommand
+
+/***********************************************************************/
+/* GetRecpos: return the position of last read record. */
+/***********************************************************************/
+int TDBEXT::GetRecpos(void)
+{
+ return Fpos;
+} // end of GetRecpos
+
+/***********************************************************************/
+/* ODBC GetMaxSize: returns table size estimate in number of lines. */
+/***********************************************************************/
+int TDBEXT::GetMaxSize(PGLOBAL g)
+{
+ if (MaxSize < 0) {
+ if (Mode == MODE_DELETE)
+ // Return 0 in mode DELETE in case of delete all.
+ MaxSize = 0;
+ else if (!Cardinality(NULL))
+ MaxSize = 10; // To make MySQL happy
+ else if ((MaxSize = Cardinality(g)) < 0)
+ MaxSize = 12; // So we can see an error occurred
+
+ } // endif MaxSize
+
+ return MaxSize;
+} // end of GetMaxSize
+
+/***********************************************************************/
+/* Return max size value. */
+/***********************************************************************/
+int TDBEXT::GetProgMax(PGLOBAL g)
+{
+ return GetMaxSize(g);
+} // end of GetProgMax
+
+/* ---------------------------EXTCOL class --------------------------- */
+
+/***********************************************************************/
+/* EXTCOL public constructor. */
+/***********************************************************************/
+EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
+ : COLBLK(cdp, tdbp, i)
+{
+ if (cprec) {
+ Next = cprec->GetNext();
+ cprec->SetNext(this);
+ } else {
+ Next = tdbp->GetColumns();
+ tdbp->SetColumns(this);
+ } // endif cprec
+
+ if (trace)
+ htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this);
+
+ // Set additional remote access method information for column.
+ Crp = NULL;
+ Long = Precision;
+ To_Val = NULL;
+ Bufp = NULL;
+ Blkp = NULL;
+ Rank = 0; // Not known yet
+} // end of JDBCCOL constructor
+
+/***********************************************************************/
+/* EXTCOL private constructor. */
+/***********************************************************************/
+EXTCOL::EXTCOL(void) : COLBLK()
+{
+ Crp = NULL;
+ Buf_Type = TYPE_INT; // This is a count(*) column
+
+ // Set additional Dos access method information for column.
+ Long = sizeof(int);
+ To_Val = NULL;
+ Bufp = NULL;
+ Blkp = NULL;
+ Rank = 1;
+} // end of EXTCOL constructor
+
+/***********************************************************************/
+/* EXTCOL constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+EXTCOL::EXTCOL(PEXTCOL col1, PTDB tdbp) : COLBLK(col1, tdbp)
+{
+ Crp = col1->Crp;
+ Long = col1->Long;
+ To_Val = col1->To_Val;
+ Bufp = col1->Bufp;
+ Blkp = col1->Blkp;
+ Rank = col1->Rank;
+} // end of JDBCCOL copy constructor
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool EXTCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+{
+ if (!(To_Val = value)) {
+ sprintf(g->Message, MSG(VALUE_ERROR), Name);
+ return true;
+ } else if (Buf_Type == value->GetType()) {
+ // Values are of the (good) column type
+ if (Buf_Type == TYPE_DATE) {
+ // If any of the date values is formatted
+ // output format must be set for the receiving table
+ if (GetDomain() || ((DTVAL *)value)->IsFormatted())
+ goto newval; // This will make a new value;
+
+ } else if (Buf_Type == TYPE_DOUBLE)
+ // Float values must be written with the correct (column) precision
+ // Note: maybe this should be forced by ShowValue instead of this ?
+ value->SetPrec(GetScale());
+
+ Value = value; // Directly access the external value
+ } else {
+ // Values are not of the (good) column type
+ if (check) {
+ sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name,
+ GetTypeName(Buf_Type), GetTypeName(value->GetType()));
+ return true;
+ } // endif check
+
+ newval:
+ if (InitValue(g)) // Allocate the matching value block
+ return true;
+
+ } // endif's Value, Buf_Type
+
+ // Because Colblk's have been made from a copy of the original TDB in
+ // case of Update, we must reset them to point to the original one.
+ if (To_Tdb->GetOrig())
+ To_Tdb = (PTDB)To_Tdb->GetOrig();
+
+ // Set the Column
+ Status = (ok) ? BUF_EMPTY : BUF_NO;
+ return false;
+} // end of SetBuffer
+
diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h
new file mode 100644
index 00000000000..2ef20c89f2c
--- /dev/null
+++ b/storage/connect/tabext.h
@@ -0,0 +1,200 @@
+/*************** Tabext H Declares Source Code File (.H) ***************/
+/* Name: TABEXT.H Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* */
+/* This is the EXTDEF, TABEXT and EXTCOL classes definitions. */
+/***********************************************************************/
+
+#ifndef __TABEXT_H
+#define __TABEXTF_H
+
+#include "reldef.h"
+
+typedef class ALIAS *PAL;
+
+class ALIAS : public BLOCK {
+ public:
+ ALIAS(PAL x, PSZ n, PSZ a, bool h)
+ {Next = x, Name = n, Alias = a, Having = h;}
+
+ PAL Next;
+ PSZ Name;
+ PSZ Alias;
+ bool Having;
+}; // end of class ALIAS
+
+// Condition filter structure
+class CONDFIL : public BLOCK {
+ public:
+ // Constructor
+ CONDFIL(const Item *cond, uint idx, AMT type);
+
+ // Functions
+ int Init(PGLOBAL g, PHC hc);
+ const char *Chk(const char *cln, bool *h);
+
+ // Members
+ const Item *Cond;
+ AMT Type;
+ uint Idx;
+ OPVAL Op;
+ PCMD Cmds;
+ PAL Alist;
+ bool All;
+ bool Bd;
+ bool Hv;
+ char *Body;
+ char *Having;
+}; // end of class CONDFIL
+
+/***********************************************************************/
+/* This class corresponds to the data base description for external */
+/* tables of type MYSQL, ODBC, JDBC... */
+/***********************************************************************/
+class DllExport EXTDEF : public TABDEF { /* EXT table */
+ friend class TDBEXT;
+public:
+ // Constructor
+ EXTDEF(void); // Constructor
+
+ // Implementation
+ virtual const char *GetType(void) { return "EXT"; }
+ inline PSZ GetTabname(void) { return Tabname; }
+ inline PSZ GetTabschema(void) { return Tabschema; }
+ inline PSZ GetUsername(void) { return Username; };
+ inline PSZ GetPassword(void) { return Password; };
+ inline PSZ GetTabcat(void) { return Tabcat; }
+ inline PSZ GetSrcdef(void) { return Srcdef; }
+ inline char GetSep(void) { return (Sep) ? *Sep : 0; }
+ inline int GetQuoted(void) { return Quoted; }
+ inline int GetOptions(void) { return Options; }
+
+ // Methods
+ virtual int Indexable(void) { return 2; }
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+
+protected:
+ // Members
+ PSZ Tabname; /* External table name */
+ PSZ Tabschema; /* External table schema */
+ PSZ Username; /* User connect name */
+ PSZ Password; /* Password connect info */
+ PSZ Tabcat; /* External table catalog */
+ PSZ Tabtyp; /* Catalog table type */
+ PSZ Colpat; /* Catalog column pattern */
+ PSZ Srcdef; /* The source table SQL definition */
+ PSZ Qchar; /* Identifier quoting character */
+ PSZ Qrystr; /* The original query */
+ PSZ Sep; /* Decimal separator */
+//PSZ Alias; /* Column alias list */
+ PSZ Phpos; /* Place holer positions */
+ int Options; /* Open connection options */
+ int Cto; /* Open connection timeout */
+ int Qto; /* Query (command) timeout */
+ int Quoted; /* Identifier quoting level */
+ int Maxerr; /* Maxerr for an Exec table */
+ int Maxres; /* Maxres for a catalog table */
+ int Memory; /* Put result set in memory */
+ bool Scrollable; /* Use scrollable cursor */
+ bool Xsrc; /* Execution type */
+}; // end of EXTDEF
+
+/***********************************************************************/
+/* This is the base class for all external tables. */
+/***********************************************************************/
+class DllExport TDBEXT : public TDB {
+public:
+ // Constructors
+ TDBEXT(EXTDEF *tdp);
+ TDBEXT(PTDBEXT tdbp);
+
+ // Implementation
+
+ // Properties
+ virtual bool IsRemote(void) { return true; }
+
+ // Methods
+ virtual PSZ GetServer(void) { return "Remote"; }
+ virtual int GetRecpos(void);
+
+ // Database routines
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual int GetProgMax(PGLOBAL g);
+
+protected:
+ // Internal functions
+ virtual bool MakeSQL(PGLOBAL g, bool cnt);
+ //virtual bool MakeInsert(PGLOBAL g);
+ virtual bool MakeCommand(PGLOBAL g);
+ int Decode(char *utf, char *buf, size_t n);
+
+ // Members
+ PQRYRES Qrp; // Points to storage result
+ PSTRG Query; // Constructed SQL query
+ char *TableName; // Points to ODBC table name
+ char *Schema; // Points to ODBC table Schema
+ char *User; // User connect info
+ char *Pwd; // Password connect info
+ char *Catalog; // Points to ODBC table Catalog
+ char *Srcdef; // The source table SQL definition
+ char *Count; // Points to count(*) SQL statement
+ //char *Where; // Points to local where clause
+ char *Quote; // The identifier quoting character
+ char *MulConn; // Used for multiple ODBC tables
+ char *DBQ; // The address part of Connect string
+ char *Qrystr; // The original query
+ char Sep; // The decimal separator
+ int Options; // Connect options
+ int Cto; // Connect timeout
+ int Qto; // Query timeout
+ int Quoted; // The identifier quoting level
+ int Fpos; // Position of last read record
+ int Curpos; // Cursor position of last fetch
+ int AftRows; // The number of affected rows
+ int Rows; // Rowset size
+ int CurNum; // Current buffer line number
+ int Rbuf; // Number of lines read in buffer
+ int BufSize; // Size of connect string buffer
+ int Nparm; // The number of statement parameters
+ int Memory; // 0: No 1: Alloc 2: Put 3: Get
+ int Ncol; // The column number (JDBC)
+ bool Scrollable; // Use scrollable cursor
+ bool Placed; // True for position reading
+}; // end of class TDBEXT
+
+/***********************************************************************/
+/* Virual class EXTCOL: external column. */
+/***********************************************************************/
+class DllExport EXTCOL : public COLBLK {
+ friend class TDBEXT;
+public:
+ // Constructor
+ EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am);
+ EXTCOL(PEXTCOL colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ inline int GetRank(void) { return Rank; }
+ inline void SetRank(int k) { Rank = k; }
+ //inline PVBLK GetBlkp(void) {return Blkp;}
+ inline void SetCrp(PCOLRES crp) { Crp = crp; }
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void ReadColumn(PGLOBAL) = 0;
+ virtual void WriteColumn(PGLOBAL) = 0;
+
+protected:
+ // Constructor for count(*) column
+ EXTCOL(void);
+
+ // Members
+ PCOLRES Crp; // To storage result
+ void *Bufp; // To extended buffer
+ PVBLK Blkp; // To Value Block
+ PVAL To_Val; // To value used for Insert
+ int Rank; // Rank (position) number in the query
+ //int Flag; // ???
+}; // end of class EXTCOL
+
+#endif // __TABEXT_H
diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp
index d99f7800f26..bf123cd36c8 100644
--- a/storage/connect/tabfix.cpp
+++ b/storage/connect/tabfix.cpp
@@ -77,7 +77,7 @@ TDBFIX::TDBFIX(PGLOBAL g, PTDBFIX tdbp) : TDBDOS(g, tdbp)
} // end of TDBFIX copy constructor
// Method
-PTDB TDBFIX::CopyOne(PTABS t)
+PTDB TDBFIX::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G;
@@ -105,7 +105,7 @@ PTDB TDBFIX::CopyOne(PTABS t)
} // endif Ftype
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Reset read/write position values. */
diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h
index 49956ba0711..4b9f9689992 100644
--- a/storage/connect/tabfix.h
+++ b/storage/connect/tabfix.h
@@ -34,7 +34,7 @@ class DllExport TDBFIX : public TDBDOS {
{return (PTDB)new(g) TDBFIX(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual void ResetDB(void);
virtual bool IsUsingTemp(PGLOBAL g);
virtual int RowNumber(PGLOBAL g, bool b = false);
diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp
index b24375443f6..0da67ef5e7f 100644
--- a/storage/connect/tabfmt.cpp
+++ b/storage/connect/tabfmt.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2001 - 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2001 - 2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -98,8 +98,9 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
int num_read = 0, num_max = 10000000; // Statistics
int len[MAXCOL], typ[MAXCOL], prc[MAXCOL];
PCSVDEF tdp;
- PTDBCSV tdbp;
- PQRYRES qrp;
+ PTDBCSV tcvp;
+ PTDBASE tdbp;
+ PQRYRES qrp;
PCOLRES crp;
if (info) {
@@ -108,10 +109,10 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
goto skipit;
} // endif info
- if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
- strcpy(g->Message, "Cannot find column definition for multiple table");
- return NULL;
- } // endif Multiple
+ //if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
+ // strcpy(g->Message, "Cannot find column definition for multiple table");
+ // return NULL;
+ //} // endif Multiple
// num_max = atoi(p+1); // Max num of record to test
imax = hmax = nerr = 0;
@@ -127,10 +128,20 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
/* Get the CSV table description block. */
/*********************************************************************/
tdp = new(g) CSVDEF;
+ tdp->Database = dp;
+
+ if ((tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false))) {
#if defined(ZIP_SUPPORT)
- tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
- tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
-#endif // ZIP_SUPPORT
+ tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
+ tdp->Mulentries = (tdp->Entry)
+ ? strchr(tdp->Entry, '*') || strchr(tdp->Entry, '?')
+ : GetBooleanTableOption(g, topt, "Mulentries", false);
+#else // !ZIP_SUPPORT
+ strcpy(g->Message, "ZIP not supported by this version");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } // endif // Zipped
+
fn = tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
if (!tdp->Fn) {
@@ -141,6 +152,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))
tdp->Lrecl = 4096;
+ tdp->Multiple = GetIntegerTableOption(g, topt, "Multiple", 0);
p = GetStringTableOption(g, topt, "Separator", ",");
tdp->Sep = (strlen(p) == 2 && p[0] == '\\' && p[1] == 't') ? '\t' : *p;
@@ -177,17 +189,18 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
htrc("File %s Sep=%c Qot=%c Header=%d maxerr=%d\n",
SVP(tdp->Fn), tdp->Sep, tdp->Qot, tdp->Header, tdp->Maxerr);
- if (tdp->Zipped) {
-#if defined(ZIP_SUPPORT)
- tdbp = new(g)TDBCSV(tdp, new(g)ZIPFAM(tdp));
-#else // !ZIP_SUPPORT
- sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
- return NULL;
-#endif // !ZIP_SUPPORT
- } else
- tdbp = new(g) TDBCSV(tdp, new(g) DOSFAM(tdp));
+ if (tdp->Zipped)
+ tcvp = new(g)TDBCSV(tdp, new(g)UNZFAM(tdp));
+ else
+ tcvp = new(g) TDBCSV(tdp, new(g) DOSFAM(tdp));
+
+ tcvp->SetMode(MODE_READ);
- tdbp->SetMode(MODE_READ);
+ if (tdp->Multiple) {
+ tdbp = new(g)TDBMUL(tcvp);
+ tdbp->SetMode(MODE_READ);
+ } else
+ tdbp = tcvp;
/*********************************************************************/
/* Open the CSV file. */
@@ -202,7 +215,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
phase = 0;
if ((rc = tdbp->ReadDB(g)) == RC_OK) {
- p = PlgDBDup(g, tdbp->To_Line);
+ p = PlgDBDup(g, tcvp->To_Line);
//skip leading blanks
for (; *p == ' '; p++) ;
@@ -245,6 +258,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
for (i = 0; i < hmax; i++)
length[0] = MY_MAX(length[0], strlen(colname[i]));
+ tcvp->Header = true; // In case of multiple table
} // endif hdr
for (num_read++; num_read <= num_max; num_read++) {
@@ -265,7 +279,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info)
/*******************************************************************/
i = n = phase = blank = digit = dec = 0;
- for (p = tdbp->To_Line; *p; p++)
+ for (p = tcvp->To_Line; *p; p++)
if (*p == sep) {
if (phase != 1) {
if (i == MAXCOL - 1) {
@@ -503,7 +517,14 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode)
/*******************************************************************/
if (Zipped) {
#if defined(ZIP_SUPPORT)
- txfp = new(g) ZIPFAM(this);
+ if (mode == MODE_READ || mode == MODE_ANY) {
+ txfp = new(g) UNZFAM(this);
+ } else if (mode == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's mode
#else // !ZIP_SUPPORT
strcpy(g->Message, "ZIP not supported");
return NULL;
@@ -640,7 +661,7 @@ TDBCSV::TDBCSV(PGLOBAL g, PTDBCSV tdbp) : TDBDOS(g, tdbp)
} // end of TDBCSV copy constructor
// Method
-PTDB TDBCSV::CopyOne(PTABS t)
+PTDB TDBCSV::Clone(PTABS t)
{
PTDB tp;
PCSVCOL cp1, cp2;
@@ -654,7 +675,7 @@ PTDB TDBCSV::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate CSV column description block. */
@@ -1148,7 +1169,7 @@ TDBFMT::TDBFMT(PGLOBAL g, PTDBFMT tdbp) : TDBCSV(g, tdbp)
} // end of TDBFMT copy constructor
// Method
-PTDB TDBFMT::CopyOne(PTABS t)
+PTDB TDBFMT::Clone(PTABS t)
{
PTDB tp;
PCSVCOL cp1, cp2;
@@ -1165,7 +1186,7 @@ PTDB TDBFMT::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate FMT column description block. */
diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h
index 5ce8d399a64..e5655435be7 100644
--- a/storage/connect/tabfmt.h
+++ b/storage/connect/tabfmt.h
@@ -52,6 +52,7 @@ public:
/***********************************************************************/
class DllExport TDBCSV : public TDBDOS {
friend class CSVCOL;
+ friend class MAPFAM;
friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool);
public:
// Constructor
@@ -64,7 +65,7 @@ public:
{return (PTDB)new(g) TDBCSV(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
//virtual bool IsUsingTemp(PGLOBAL g);
virtual int GetBadLines(void) {return (int)Nerr;}
@@ -147,7 +148,7 @@ class DllExport TDBFMT : public TDBCSV {
{return (PTDB)new(g) TDBFMT(g, this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp
index 912e6c7d530..5431e35e0ec 100644
--- a/storage/connect/tabjdbc.cpp
+++ b/storage/connect/tabjdbc.cpp
@@ -1,11 +1,11 @@
/************* TabJDBC C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABJDBC */
/* ------------- */
-/* Version 1.1 */
+/* Version 1.2 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -69,9 +69,10 @@
#include "plgdbsem.h"
#include "mycat.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabjdbc.h"
#include "tabmul.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabcol.h"
#include "valblk.h"
#include "ha_connect.h"
@@ -96,10 +97,7 @@ bool ExactInfo(void);
/***********************************************************************/
JDBCDEF::JDBCDEF(void)
{
- Driver = Url = Wrapname =Tabname = Tabschema = Username = Colpat = NULL;
- Password = Tabcat = Tabtype = Srcdef = Qchar = Qrystr = Sep = NULL;
- Options = Quoted = Maxerr = Maxres = Memory = 0;
- Scrollable = Xsrc = false;
+ Driver = Url = Wrapname = NULL;
} // end of JDBCDEF constructor
/***********************************************************************/
@@ -134,23 +132,26 @@ bool JDBCDEF::SetParms(PJPARM sjp)
int JDBCDEF::ParseURL(PGLOBAL g, char *url, bool b)
{
if (strncmp(url, "jdbc:", 5)) {
+ PSZ p;
+
// No "jdbc:" in connection string. Must be a straight
// "server" or "server/table"
// ok, so we do a little parsing, but not completely!
- if ((Tabname= strchr(url, '/'))) {
+ if ((p = strchr(url, '/'))) {
// If there is a single '/' in the connection string,
// this means the user is specifying a table name
- *Tabname++= '\0';
+ *p++= '\0';
// there better not be any more '/'s !
- if (strchr(Tabname, '/'))
+ if (strchr(p, '/'))
return RC_FX;
- } else if (b) {
- // Otherwise, straight server name,
- Tabname = GetStringCatInfo(g, "Name", NULL);
- Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- } // endelse
+ Tabname = p;
+// } else if (b) {
+// // Otherwise, straight server name,
+// Tabname = GetStringCatInfo(g, "Name", NULL);
+// Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+ } // endif
if (trace)
htrc("server: %s Tabname: %s", url, Tabname);
@@ -204,6 +205,9 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
int rc = RC_OK;
+ if (EXTDEF::DefineAM(g, am, poff))
+ return true;
+
Driver = GetStringCatInfo(g, "Driver", NULL);
Desc = Url = GetStringCatInfo(g, "Connect", NULL);
@@ -223,41 +227,41 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
if (rc == RC_FX) // Error
return true;
- else if (rc == RC_OK) { // Url was not a server name
- Tabname = GetStringCatInfo(g, "Name",
- (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
- Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- Username = GetStringCatInfo(g, "User", NULL);
- Password = GetStringCatInfo(g, "Password", NULL);
- } // endif rc
+//else if (rc == RC_OK) { // Url was not a server name
+// Tabname = GetStringCatInfo(g, "Name",
+// (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+// Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+// Username = GetStringCatInfo(g, "User", NULL);
+// Password = GetStringCatInfo(g, "Password", NULL);
+//} // endif rc
- if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
- Read_Only = true;
+//if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
+// Read_Only = true;
Wrapname = GetStringCatInfo(g, "Wrapper", NULL);
//Prop = GetStringCatInfo(g, "Properties", NULL);
- Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
- Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
- Tabschema = GetStringCatInfo(g, "Dbname", NULL);
- Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
-
- if (Catfunc == FNC_COL)
- Colpat = GetStringCatInfo(g, "Colpat", NULL);
-
- if (Catfunc == FNC_TABLE)
- Tabtype = GetStringCatInfo(g, "Tabtype", NULL);
-
- Qrystr = GetStringCatInfo(g, "Query_String", "?");
- Sep = GetStringCatInfo(g, "Separator", NULL);
- Xsrc = GetBoolCatInfo("Execsrc", FALSE);
- Maxerr = GetIntCatInfo("Maxerr", 0);
- Maxres = GetIntCatInfo("Maxres", 0);
- Quoted = GetIntCatInfo("Quoted", 0);
-//Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
-//Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
- Scrollable = GetBoolCatInfo("Scrollable", false);
- Memory = GetIntCatInfo("Memory", 0);
- Pseudo = 2; // FILID is Ok but not ROWID
+//Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
+//Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
+//Tabschema = GetStringCatInfo(g, "Dbname", NULL);
+//Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
+
+//if (Catfunc == FNC_COL)
+// Colpat = GetStringCatInfo(g, "Colpat", NULL);
+
+//if (Catfunc == FNC_TABLE)
+// Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
+
+//Qrystr = GetStringCatInfo(g, "Query_String", "?");
+//Sep = GetStringCatInfo(g, "Separator", NULL);
+//Xsrc = GetBoolCatInfo("Execsrc", FALSE);
+//Maxerr = GetIntCatInfo("Maxerr", 0);
+//Maxres = GetIntCatInfo("Maxres", 0);
+//Quoted = GetIntCatInfo("Quoted", 0);
+// Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
+// Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
+//Scrollable = GetBoolCatInfo("Scrollable", false);
+//Memory = GetIntCatInfo("Memory", 0);
+//Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -266,7 +270,7 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
/***********************************************************************/
PTDB JDBCDEF::GetTable(PGLOBAL g, MODE m)
{
- PTDBASE tdbp = NULL;
+ PTDB tdbp = NULL;
/*********************************************************************/
/* Allocate a TDB of the proper type. */
@@ -326,7 +330,7 @@ int JDBCPARM::CheckSize(int rows)
/***********************************************************************/
/* Implementation of the TDBJDBC class. */
/***********************************************************************/
-TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBASE(tdp)
+TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp)
{
Jcp = NULL;
Cnp = NULL;
@@ -335,101 +339,45 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBASE(tdp)
Ops.Driver = tdp->Driver;
Ops.Url = tdp->Url;
WrapName = tdp->Wrapname;
- TableName = tdp->Tabname;
- Schema = tdp->Tabschema;
Ops.User = tdp->Username;
Ops.Pwd = tdp->Password;
// Ops.Properties = tdp->Prop;
- Catalog = tdp->Tabcat;
- Srcdef = tdp->Srcdef;
- Qrystr = tdp->Qrystr;
- Sep = tdp->GetSep();
- Options = tdp->Options;
// Ops.Cto = tdp->Cto;
// Ops.Qto = tdp->Qto;
- Quoted = MY_MAX(0, tdp->GetQuoted());
- Rows = tdp->GetElemt();
- Memory = tdp->Memory;
Ops.Scrollable = tdp->Scrollable;
} else {
WrapName = NULL;
- TableName = NULL;
- Schema = NULL;
Ops.Driver = NULL;
Ops.Url = NULL;
Ops.User = NULL;
Ops.Pwd = NULL;
// Ops.Properties = NULL;
- Catalog = NULL;
- Srcdef = NULL;
- Qrystr = NULL;
- Sep = 0;
- Options = 0;
// Ops.Cto = DEFAULT_LOGIN_TIMEOUT;
// Ops.Qto = DEFAULT_QUERY_TIMEOUT;
- Quoted = 0;
- Rows = 0;
- Memory = 0;
Ops.Scrollable = false;
} // endif tdp
- Quote = NULL;
- Query = NULL;
- Count = NULL;
-//Where = NULL;
- MulConn = NULL;
- DBQ = NULL;
- Qrp = NULL;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = 0;
- Ncol = 0;
- Nparm = 0;
- Placed = false;
+//Ncol = 0;
Prepared = false;
Werr = false;
Rerr = false;
Ops.Fsize = Ops.CheckSize(Rows);
} // end of TDBJDBC standard constructor
-TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBASE(tdbp)
+TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBEXT(tdbp)
{
Jcp = tdbp->Jcp; // is that right ?
Cnp = tdbp->Cnp;
WrapName = tdbp->WrapName;
- TableName = tdbp->TableName;
- Schema = tdbp->Schema;
Ops = tdbp->Ops;
- Catalog = tdbp->Catalog;
- Srcdef = tdbp->Srcdef;
- Qrystr = tdbp->Qrystr;
- Memory = tdbp->Memory;
-//Scrollable = tdbp->Scrollable;
- Quote = tdbp->Quote;
- Query = tdbp->Query;
- Count = tdbp->Count;
-//Where = tdbp->Where;
- MulConn = tdbp->MulConn;
- DBQ = tdbp->DBQ;
- Options = tdbp->Options;
- Quoted = tdbp->Quoted;
- Rows = tdbp->Rows;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = tdbp->BufSize;
- Nparm = tdbp->Nparm;
- Qrp = tdbp->Qrp;
- Placed = false;
+//Ncol = tdbp->Ncol;
+ Prepared = tdbp->Prepared;
+ Werr = tdbp->Werr;
+ Rerr = tdbp->Rerr;
} // end of TDBJDBC copy constructor
// Method
-PTDB TDBJDBC::CopyOne(PTABS t)
+PTDB TDBJDBC::Clone(PTABS t)
{
PTDB tp;
PJDBCCOL cp1, cp2;
@@ -443,7 +391,7 @@ PTDB TDBJDBC::CopyOne(PTABS t)
} // endfor cp1
return tp;
-} // end of CopyOne
+} // end of Clone
/***********************************************************************/
/* Allocate JDBC column description block. */
@@ -453,134 +401,6 @@ PCOL TDBJDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
return new(g)JDBCCOL(cdp, this, cprec, n);
} // end of MakeCol
-/******************************************************************/
-/* Convert an UTF-8 string to latin characters. */
-/******************************************************************/
-int TDBJDBC::Decode(char *txt, char *buf, size_t n)
-{
- uint dummy_errors;
- uint32 len= copy_and_convert(buf, n, &my_charset_latin1,
- txt, strlen(txt),
- &my_charset_utf8_general_ci,
- &dummy_errors);
- buf[len]= '\0';
- return 0;
-} // end of Decode
-
-/***********************************************************************/
-/* MakeSQL: make the SQL statement use with JDBC connection. */
-/* TODO: when implementing EOM filtering, column only used in local */
-/* filter should be removed from column list. */
-/***********************************************************************/
-bool TDBJDBC::MakeSQL(PGLOBAL g, bool cnt)
-{
- char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3];
- int len;
- bool oom = false, first = true;
- PTABLE tablep = To_Table;
- PCOL colp;
-
- if (Srcdef) {
- Query = new(g)STRING(g, 0, Srcdef);
- return false;
- } // endif Srcdef
-
- // Allocate the string used to contain the Query
- Query = new(g)STRING(g, 1023, "SELECT ");
-
- if (!cnt) {
- if (Columns) {
- // Normal SQL statement to retrieve results
- for (colp = Columns; colp; colp = colp->GetNext())
- if (!colp->IsSpecial()) {
- if (!first)
- oom |= Query->Append(", ");
- else
- first = false;
-
- // Column name can be encoded in UTF-8
- Decode(colp->GetName(), buf, sizeof(buf));
-
- if (Quote) {
- // Put column name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
- } else
- oom |= Query->Append(buf);
-
- ((PJDBCCOL)colp)->Rank = ++Ncol;
- } // endif colp
-
- } else
- // !Columns can occur for queries such that sql count(*) from...
- // for which we will count the rows from sql * from...
- oom |= Query->Append('*');
-
- } else
- // SQL statement used to retrieve the size of the result
- oom |= Query->Append("count(*)");
-
- oom |= Query->Append(" FROM ");
-
- if (Catalog && *Catalog)
- catp = Catalog;
-
- //if (tablep->GetSchema())
- // schmp = (char*)tablep->GetSchema();
- //else
- if (Schema && *Schema)
- schmp = Schema;
-
- if (catp) {
- oom |= Query->Append(catp);
-
- if (schmp) {
- oom |= Query->Append('.');
- oom |= Query->Append(schmp);
- } // endif schmp
-
- oom |= Query->Append('.');
- } else if (schmp) {
- oom |= Query->Append(schmp);
- oom |= Query->Append('.');
- } // endif schmp
-
- // Table name can be encoded in UTF-8
- Decode(TableName, buf, sizeof(buf));
-
- if (Quote) {
- // Put table name between identifier quotes in case in contains blanks
- oom |= Query->Append(Quote);
- oom |= Query->Append(buf);
- oom |= Query->Append(Quote);
- } else
- oom |= Query->Append(buf);
-
- len = Query->GetLength();
-
- if (To_CondFil) {
- if (Mode == MODE_READ) {
- oom |= Query->Append(" WHERE ");
- oom |= Query->Append(To_CondFil->Body);
- len = Query->GetLength() + 1;
- } else
- len += (strlen(To_CondFil->Body) + 256);
-
- } else
- len += ((Mode == MODE_READX) ? 256 : 1);
-
- if (oom || Query->Resize(len)) {
- strcpy(g->Message, "MakeSQL: Out of memory");
- return true;
- } // endif oom
-
- if (trace)
- htrc("Query=%s\n", Query->GetStr());
-
- return false;
-} // end of MakeSQL
-
/***********************************************************************/
/* MakeInsert: make the Insert statement used with JDBC connection. */
/***********************************************************************/
@@ -601,7 +421,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g)
// Column name can be encoded in UTF-8
Decode(colp->GetName(), buf, sizeof(buf));
len += (strlen(buf) + 6); // comma + quotes + valist
- ((PJDBCCOL)colp)->Rank = ++Nparm;
+ ((PEXTCOL)colp)->SetRank(++Nparm);
} // endif colp
// Below 32 is enough to contain the fixed part of the query
@@ -711,76 +531,6 @@ bool TDBJDBC::SetParameters(PGLOBAL g)
} // end of SetParameters
/***********************************************************************/
-/* MakeCommand: make the Update or Delete statement to send to the */
-/* MySQL server. Limited to remote values and filtering. */
-/***********************************************************************/
-bool TDBJDBC::MakeCommand(PGLOBAL g)
-{
- char *p, *stmt, name[68], *body = NULL, *qc = Jcp->GetQuoteChar();
- char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1);
- bool qtd = Quoted > 0;
- int i = 0, k = 0;
-
- // Make a lower case copy of the originale query and change
- // back ticks to the data source identifier quoting character
- do {
- qrystr[i] = (Qrystr[i] == '`') ? *qc : tolower(Qrystr[i]);
- } while (Qrystr[i++]);
-
- if (To_CondFil && (p = strstr(qrystr, " where "))) {
- p[7] = 0; // Remove where clause
- Qrystr[(p - qrystr) + 7] = 0;
- body = To_CondFil->Body;
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr)
- + strlen(body) + 64);
- } else
- stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64);
-
- // Check whether the table name is equal to a keyword
- // If so, it must be quoted in the original query
- strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
-
- if (strstr(" update delete low_priority ignore quick from ", name)) {
- strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
- k += 2;
- } else
- strlwr(strcpy(name, Name)); // Not a keyword
-
- if ((p = strstr(qrystr, name))) {
- for (i = 0; i < p - qrystr; i++)
- stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i];
-
- stmt[i] = 0;
- k += i + (int)strlen(Name);
-
- if (qtd && *(p-1) == ' ')
- strcat(strcat(strcat(stmt, qc), TableName), qc);
- else
- strcat(stmt, TableName);
-
- i = (int)strlen(stmt);
-
- do {
- stmt[i++] = (Qrystr[k] == '`') ? *qc : Qrystr[k];
- } while (Qrystr[k++]);
-
- if (body)
- strcat(stmt, body);
-
- } else {
- sprintf(g->Message, "Cannot use this %s command",
- (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return NULL;
- } // endif p
-
- if (trace)
- htrc("Command=%s\n", stmt);
-
- Query = new(g)STRING(g, 0, stmt);
- return (!Query->GetSize());
-} // end of MakeCommand
-
-/***********************************************************************/
/* ResetSize: call by TDBMUL when calculating size estimate. */
/***********************************************************************/
void TDBJDBC::ResetSize(void)
@@ -834,33 +584,6 @@ int TDBJDBC::Cardinality(PGLOBAL g)
} // end of Cardinality
/***********************************************************************/
-/* JDBC GetMaxSize: returns table size estimate in number of lines. */
-/***********************************************************************/
-int TDBJDBC::GetMaxSize(PGLOBAL g)
-{
- if (MaxSize < 0) {
- if (Mode == MODE_DELETE)
- // Return 0 in mode DELETE in case of delete all.
- MaxSize = 0;
- else if (!Cardinality(NULL))
- MaxSize = 10; // To make MySQL happy
- else if ((MaxSize = Cardinality(g)) < 0)
- MaxSize = 12; // So we can see an error occured
-
- } // endif MaxSize
-
- return MaxSize;
-} // end of GetMaxSize
-
-/***********************************************************************/
-/* Return max size value. */
-/***********************************************************************/
-int TDBJDBC::GetProgMax(PGLOBAL g)
-{
- return GetMaxSize(g);
-} // end of GetProgMax
-
-/***********************************************************************/
/* JDBC Access Method opening routine. */
/* New method now that this routine is called recursively (last table */
/* first in reverse order): index blocks are immediately linked to */
@@ -997,6 +720,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g)
return false;
} // end of OpenDB
+#if 0
/***********************************************************************/
/* GetRecpos: return the position of last read record. */
/***********************************************************************/
@@ -1004,6 +728,7 @@ int TDBJDBC::GetRecpos(void)
{
return Fpos;
} // end of GetRecpos
+#endif // 0
/***********************************************************************/
/* SetRecpos: set the position of next read record. */
@@ -1105,8 +830,7 @@ int TDBJDBC::ReadDB(PGLOBAL g)
int rc;
if (trace > 1)
- htrc("JDBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
+ htrc("JDBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE) {
if (!Query && MakeCommand(g))
@@ -1125,12 +849,6 @@ int TDBJDBC::ReadDB(PGLOBAL g)
} // endif Mode
- if (To_Kindex) {
- // Direct access of JDBC tables is not implemented
- strcpy(g->Message, "No JDBC direct access");
- return RC_FX;
- } // endif To_Kindex
-
/*********************************************************************/
/* Now start the reading process. */
/* Here is the place to fetch the line(s). */
@@ -1302,70 +1020,26 @@ void TDBJDBC::CloseDB(PGLOBAL g)
/* JDBCCOL public constructor. */
/***********************************************************************/
JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+ : EXTCOL(cdp, tdbp, cprec, i, am)
{
- if (cprec) {
- Next = cprec->GetNext();
- cprec->SetNext(this);
- } else {
- Next = tdbp->GetColumns();
- tdbp->SetColumns(this);
- } // endif cprec
-
- // Set additional JDBC access method information for column.
- Crp = NULL;
- //Long = cdp->GetLong();
- Long = Precision;
- //strcpy(F_Date, cdp->F_Date);
- To_Val = NULL;
-//Slen = 0;
-//StrLen = &Slen;
-//Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 0; // Not known yet
-
- if (trace)
- htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this);
-
} // end of JDBCCOL constructor
/***********************************************************************/
/* JDBCCOL private constructor. */
/***********************************************************************/
-JDBCCOL::JDBCCOL(void) : COLBLK()
+JDBCCOL::JDBCCOL(void) : EXTCOL()
{
- Crp = NULL;
- Buf_Type = TYPE_INT; // This is a count(*) column
- // Set additional Dos access method information for column.
- Long = sizeof(int);
- To_Val = NULL;
-//Slen = 0;
-//StrLen = &Slen;
-//Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 1;
} // end of JDBCCOL constructor
/***********************************************************************/
/* JDBCCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
-JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
+JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
{
- Crp = col1->Crp;
- Long = col1->Long;
- //strcpy(F_Date, col1->F_Date);
- To_Val = col1->To_Val;
-//Slen = col1->Slen;
-//StrLen = col1->StrLen;
-//Sqlbuf = col1->Sqlbuf;
- Bufp = col1->Bufp;
- Blkp = col1->Blkp;
- Rank = col1->Rank;
} // end of JDBCCOL copy constructor
+#if 0
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
@@ -1411,6 +1085,7 @@ bool JDBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
Status = (ok) ? BUF_EMPTY : BUF_NO;
return false;
} // end of SetBuffer
+#endif // 0
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
@@ -1456,72 +1131,8 @@ void JDBCCOL::ReadColumn(PGLOBAL g)
} // end of ReadColumn
-#if 0
/***********************************************************************/
-/* AllocateBuffers: allocate the extended buffer for SQLExtendedFetch */
-/* or Fetch. Note: we use Long+1 here because JDBC must have space */
-/* for the ending null character. */
-/***********************************************************************/
-void JDBCCOL::AllocateBuffers(PGLOBAL g, int rows)
-{
- if (Buf_Type == TYPE_DATE)
- Sqlbuf = (TIMESTAMP_STRUCT*)PlugSubAlloc(g, NULL,
- sizeof(TIMESTAMP_STRUCT));
-
- if (!rows)
- return;
-
- if (Buf_Type == TYPE_DATE)
- Bufp = PlugSubAlloc(g, NULL, rows * sizeof(TIMESTAMP_STRUCT));
- else {
- Blkp = AllocValBlock(g, NULL, Buf_Type, rows, GetBuflen(),
- GetScale(), true, false, false);
- Bufp = Blkp->GetValPointer();
- } // endelse
-
- if (rows > 1)
- StrLen = (SQLLEN *)PlugSubAlloc(g, NULL, rows * sizeof(SQLLEN));
-
-} // end of AllocateBuffers
-
-/***********************************************************************/
-/* Returns the buffer to use for Fetch or Extended Fetch. */
-/***********************************************************************/
-void *JDBCCOL::GetBuffer(DWORD rows)
-{
- if (rows && To_Tdb) {
- assert(rows == (DWORD)((TDBJDBC*)To_Tdb)->Rows);
- return Bufp;
- } else
- return (Buf_Type == TYPE_DATE) ? Sqlbuf : Value->GetTo_Val();
-
-} // end of GetBuffer
-
-/***********************************************************************/
-/* Returns the buffer length to use for Fetch or Extended Fetch. */
-/***********************************************************************/
-SWORD JDBCCOL::GetBuflen(void)
-{
- SWORD flen;
-
- switch (Buf_Type) {
- case TYPE_DATE:
- flen = (SWORD)sizeof(TIMESTAMP_STRUCT);
- break;
- case TYPE_STRING:
- case TYPE_DECIM:
- flen = (SWORD)Value->GetClen() + 1;
- break;
- default:
- flen = (SWORD)Value->GetClen();
- } // endswitch Buf_Type
-
- return flen;
-} // end of GetBuflen
-#endif // 0
-
-/***********************************************************************/
-/* WriteColumn: make sure the bind buffer is updated. */
+/* WriteColumn: Convert if necessary. */
/***********************************************************************/
void JDBCCOL::WriteColumn(PGLOBAL g)
{
@@ -1531,30 +1142,6 @@ void JDBCCOL::WriteColumn(PGLOBAL g)
if (Value != To_Val)
Value->SetValue_pval(To_Val, FALSE); // Convert the inserted value
-#if 0
- if (Buf_Type == TYPE_DATE) {
- struct tm tm, *dbtime = ((DTVAL*)Value)->GetGmTime(&tm);
-
- Sqlbuf->second = dbtime->tm_sec;
- Sqlbuf->minute = dbtime->tm_min;
- Sqlbuf->hour = dbtime->tm_hour;
- Sqlbuf->day = dbtime->tm_mday;
- Sqlbuf->month = dbtime->tm_mon + 1;
- Sqlbuf->year = dbtime->tm_year + 1900;
- Sqlbuf->fraction = 0;
- } else if (Buf_Type == TYPE_DECIM) {
- // Some data sources require local decimal separator
- char *p, sep = ((PTDBJDBC)To_Tdb)->Sep;
-
- if (sep && (p = strchr(Value->GetCharValue(), '.')))
- *p = sep;
-
- } // endif Buf_Type
-
- if (Nullable)
- *StrLen = (Value->IsNull()) ? SQL_NULL_DATA :
- (IsTypeChar(Buf_Type)) ? SQL_NTS : 0;
-#endif // 0
} // end of WriteColumn
/* -------------------------- Class TDBXJDC -------------------------- */
@@ -1795,7 +1382,7 @@ TDBJTB::TDBJTB(PJDBCDEF tdp) : TDBJDRV(tdp)
{
Schema = tdp->Tabschema;
Tab = tdp->Tabname;
- Tabtype = tdp->Tabtype;
+ Tabtype = tdp->Tabtyp;
Ops.Driver = tdp->Driver;
Ops.Url = tdp->Url;
Ops.User = tdp->Username;
diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h
index fee8223abaf..46d2073e923 100644
--- a/storage/connect/tabjdbc.h
+++ b/storage/connect/tabjdbc.h
@@ -21,7 +21,7 @@ typedef class JSRCCOL *PJSRCCOL;
/***********************************************************************/
/* JDBC table. */
/***********************************************************************/
-class DllExport JDBCDEF : public TABDEF { /* Logical table description */
+class DllExport JDBCDEF : public EXTDEF { /* Logical table description */
friend class TDBJDBC;
friend class TDBXJDC;
friend class TDBJDRV;
@@ -33,17 +33,8 @@ public:
// Implementation
virtual const char *GetType(void) { return "JDBC"; }
- PSZ GetTabname(void) { return Tabname; }
- PSZ GetTabschema(void) { return Tabschema; }
- PSZ GetTabcat(void) { return Tabcat; }
- PSZ GetSrcdef(void) { return Srcdef; }
- char GetSep(void) { return (Sep) ? *Sep : 0; }
- int GetQuoted(void) { return Quoted; }
-//int GetCatver(void) { return Catver; }
- int GetOptions(void) { return Options; }
// Methods
- virtual int Indexable(void) { return 2; }
virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
virtual PTDB GetTable(PGLOBAL g, MODE m);
int ParseURL(PGLOBAL g, char *url, bool b = true);
@@ -53,28 +44,7 @@ protected:
// Members
PSZ Driver; /* JDBC driver */
PSZ Url; /* JDBC driver URL */
- PSZ Tabname; /* External table name */
PSZ Wrapname; /* Java wrapper name */
- PSZ Tabschema; /* External table schema */
- PSZ Username; /* User connect name */
- PSZ Password; /* Password connect info */
-//PSZ Prop; /* Connection Properties */
- PSZ Tabcat; /* External table catalog */
- PSZ Tabtype; /* External table type */
- PSZ Colpat; /* Catalog column pattern */
- PSZ Srcdef; /* The source table SQL definition */
- PSZ Qchar; /* Identifier quoting character */
- PSZ Qrystr; /* The original query */
- PSZ Sep; /* Decimal separator */
- int Options; /* Open connection options */
-//int Cto; /* Open connection timeout */
-//int Qto; /* Query (command) timeout */
- int Quoted; /* Identifier quoting level */
- int Maxerr; /* Maxerr for an Exec table */
- int Maxres; /* Maxres for a catalog table */
- int Memory; /* Put result set in memory */
- bool Scrollable; /* Use scrollable cursor */
- bool Xsrc; /* Execution type */
}; // end of JDBCDEF
#if !defined(NJDBC)
@@ -84,34 +54,34 @@ protected:
/* This is the JDBC Access Method class declaration for files from */
/* other DB drivers to be accessed via JDBC. */
/***********************************************************************/
-class TDBJDBC : public TDBASE {
+class TDBJDBC : public TDBEXT {
friend class JDBCCOL;
friend class JDBConn;
public:
// Constructor
TDBJDBC(PJDBCDEF tdp = NULL);
- TDBJDBC(PTDBJDBC tdbp);
+ TDBJDBC(PTDBJDBC tdbp);
// Implementation
- virtual AMT GetAmType(void) { return TYPE_AM_JDBC; }
- virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g)TDBJDBC(this); }
+ virtual AMT GetAmType(void) {return TYPE_AM_JDBC;}
+ virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBJDBC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
- virtual int GetRecpos(void);
+ virtual PTDB Clone(PTABS t);
+//virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
//virtual PSZ GetFile(PGLOBAL g);
//virtual void SetFile(PGLOBAL g, PSZ fn);
virtual void ResetSize(void);
- //virtual int GetAffectedRows(void) {return AftRows;}
+//virtual int GetAffectedRows(void) {return AftRows;}
virtual PSZ GetServer(void) { return "JDBC"; }
virtual int Indexable(void) { return 2; }
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
- virtual int GetProgMax(PGLOBAL g);
+//virtual int GetMaxSize(PGLOBAL g);
+//virtual int GetProgMax(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -121,97 +91,50 @@ public:
protected:
// Internal functions
- int Decode(char *utf, char *buf, size_t n);
- bool MakeSQL(PGLOBAL g, bool cnt);
+//int Decode(char *utf, char *buf, size_t n);
+//bool MakeSQL(PGLOBAL g, bool cnt);
bool MakeInsert(PGLOBAL g);
- bool MakeCommand(PGLOBAL g);
- //bool MakeFilter(PGLOBAL g, bool c);
+//virtual bool MakeCommand(PGLOBAL g);
+//bool MakeFilter(PGLOBAL g, bool c);
bool SetParameters(PGLOBAL g);
- //char *MakeUpdate(PGLOBAL g);
- //char *MakeDelete(PGLOBAL g);
+//char *MakeUpdate(PGLOBAL g);
+//char *MakeDelete(PGLOBAL g);
// Members
JDBConn *Jcp; // Points to a JDBC connection class
JDBCCOL *Cnp; // Points to count(*) column
JDBCPARM Ops; // Additional parameters
- PSTRG Query; // Constructed SQL query
char *WrapName; // Points to Java wrapper name
- char *TableName; // Points to JDBC table name
- char *Schema; // Points to JDBC table Schema
- char *User; // User connect info
- char *Pwd; // Password connect info
- char *Catalog; // Points to JDBC table Catalog
- char *Srcdef; // The source table SQL definition
- char *Count; // Points to count(*) SQL statement
-//char *Where; // Points to local where clause
- char *Quote; // The identifier quoting character
- char *MulConn; // Used for multiple JDBC tables
- char *DBQ; // The address part of Connect string
- char *Qrystr; // The original query
- char Sep; // The decimal separator
- int Options; // Connect options
-//int Cto; // Connect timeout
-//int Qto; // Query timeout
- int Quoted; // The identifier quoting level
- int Fpos; // Position of last read record
- int Curpos; // Cursor position of last fetch
- int AftRows; // The number of affected rows
- int Rows; // Rowset size
- int CurNum; // Current buffer line number
- int Rbuf; // Number of lines read in buffer
- int BufSize; // Size of connect string buffer
- int Ncol; // The column number
- int Nparm; // The number of statement parameters
- int Memory; // 0: No 1: Alloc 2: Put 3: Get
-//bool Scrollable; // Use scrollable cursor --> in Ops
- bool Placed; // True for position reading
+//int Ncol; // The column number
bool Prepared; // True when using prepared statement
bool Werr; // Write error
bool Rerr; // Rewind error
- PQRYRES Qrp; // Points to storage result
}; // end of class TDBJDBC
/***********************************************************************/
/* Class JDBCCOL: JDBC access method column descriptor. */
/* This A.M. is used for JDBC tables. */
/***********************************************************************/
-class JDBCCOL : public COLBLK {
+class JDBCCOL : public EXTCOL {
friend class TDBJDBC;
public:
// Constructors
JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC");
- JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process
+ JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process
// Implementation
- virtual int GetAmType(void) { return TYPE_AM_JDBC; }
-//SQLLEN *GetStrLen(void) { return StrLen; }
- int GetRank(void) { return Rank; }
-//PVBLK GetBlkp(void) {return Blkp;}
- void SetCrp(PCOLRES crp) { Crp = crp; }
+ virtual int GetAmType(void) { return TYPE_AM_JDBC; }
// Methods
- virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
- virtual void ReadColumn(PGLOBAL g);
- virtual void WriteColumn(PGLOBAL g);
-//void AllocateBuffers(PGLOBAL g, int rows);
-//void *GetBuffer(DWORD rows);
-//SWORD GetBuflen(void);
- // void Print(PGLOBAL g, FILE *, uint);
+//virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
protected:
- // Constructor used by GetMaxSize
- JDBCCOL(void);
+ // Constructor for count(*) column
+ JDBCCOL(void);
// Members
- //TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's
- PCOLRES Crp; // To storage result
- void *Bufp; // To extended buffer
- PVBLK Blkp; // To Value Block
- //char F_Date[12]; // Internal Date format
- PVAL To_Val; // To value used for Insert
-//SQLLEN *StrLen; // As returned by JDBC
-//SQLLEN Slen; // Used with Fetch
- int Rank; // Rank (position) number in the query
}; // end of class JDBCCOL
/***********************************************************************/
@@ -268,7 +191,7 @@ public:
JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC");
// Implementation
- //virtual int GetAmType(void) {return TYPE_AM_JDBC;}
+ virtual int GetAmType(void) {return TYPE_AM_JDBC;}
// Methods
virtual void ReadColumn(PGLOBAL g);
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index 1b9ce8b64c9..1e11d454cfc 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -129,7 +129,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
if (tdp->Pretty == 2) {
if (tdp->Zipped) {
#if defined(ZIP_SUPPORT)
- tjsp = new(g) TDBJSON(tdp, new(g) ZIPFAM(tdp));
+ tjsp = new(g) TDBJSON(tdp, new(g) UNZFAM(tdp));
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -151,7 +151,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info)
if (tdp->Zipped) {
#if defined(ZIP_SUPPORT)
- tjnp = new(g)TDBJSN(tdp, new(g)ZIPFAM(tdp));
+ tjnp = new(g)TDBJSN(tdp, new(g)UNZFAM(tdp));
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -441,7 +441,14 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
if (Zipped) {
#if defined(ZIP_SUPPORT)
- txfp = new(g) ZIPFAM(this);
+ if (m == MODE_READ || m == MODE_UPDATE) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -479,7 +486,15 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
} else {
if (Zipped) {
#if defined(ZIP_SUPPORT)
- txfp = new(g)ZIPFAM(this);
+ if (m == MODE_READ || m == MODE_UPDATE) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
+ return NULL;
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
@@ -559,7 +574,7 @@ TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp)
} // end of TDBJSN copy constructor
// Used for update
-PTDB TDBJSN::CopyOne(PTABS t)
+PTDB TDBJSN::Clone(PTABS t)
{
G = NULL;
PTDB tp;
@@ -574,7 +589,7 @@ PTDB TDBJSN::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate JSN column description block. */
@@ -1563,7 +1578,7 @@ TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp)
} // end of TDBJSON copy constructor
// Used for update
-PTDB TDBJSON::CopyOne(PTABS t)
+PTDB TDBJSON::Clone(PTABS t)
{
PTDB tp;
PJCOL cp1, cp2;
@@ -1577,7 +1592,7 @@ PTDB TDBJSON::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Make the document tree from the object path. */
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index c9d30d48f2a..924ce387900 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -82,7 +82,7 @@ public:
void SetG(PGLOBAL g) {G = g;}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual PCOL InsertSpecialColumn(PCOL colp);
virtual int RowNumber(PGLOBAL g, bool b = FALSE)
@@ -188,7 +188,7 @@ class TDBJSON : public TDBJSN {
PJAR GetDoc(void) {return Doc;}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual int Cardinality(PGLOBAL g);
diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp
index c21bb1660ea..916449be6c6 100644
--- a/storage/connect/table.cpp
+++ b/storage/connect/table.cpp
@@ -1,7 +1,7 @@
/************** Table C++ Functions Source Code File (.CPP) ************/
-/* Name: TABLE.CPP Version 2.7 */
+/* Name: TABLE.CPP Version 2.8 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1999-2016 */
+/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */
/* */
/* This file contains the TBX, TDB and OPJOIN classes functions. */
/***********************************************************************/
@@ -10,6 +10,7 @@
/* Include relevant MariaDB header file. */
/***********************************************************************/
#include "my_global.h"
+#include "sql_string.h"
/***********************************************************************/
/* Include required application header files */
@@ -40,8 +41,9 @@ void AddPointer(PTABS, void *);
/* TDB public constructors. */
/***********************************************************************/
TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum)
- {
- Use = USE_NO;
+{
+ To_Def = tdp;
+ Use = USE_NO;
To_Orig = NULL;
To_Filter = NULL;
To_CondFil = NULL;
@@ -49,14 +51,20 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum)
Name = (tdp) ? tdp->GetName() : NULL;
To_Table = NULL;
Columns = NULL;
- Degree = (tdp) ? tdp->GetDegree() : 0;
+ To_SetCols = NULL;
+ Degree = (tdp) ? tdp->GetDegree() : 0;
Mode = MODE_ANY;
Cardinal = -1;
- } // end of TDB standard constructor
+ MaxSize = -1;
+ Read_Only = (tdp) ? tdp->IsReadOnly() : false;
+ m_data_charset = (tdp) ? tdp->data_charset() : NULL;
+ csname = (tdp) ? tdp->csname : NULL;
+} // end of TDB standard constructor
TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum)
- {
- Use = tdbp->Use;
+{
+ To_Def = tdbp->To_Def;
+ Use = tdbp->Use;
To_Orig = tdbp;
To_Filter = NULL;
To_CondFil = NULL;
@@ -64,12 +72,192 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum)
Name = tdbp->Name;
To_Table = tdbp->To_Table;
Columns = NULL;
- Degree = tdbp->Degree;
+ To_SetCols = tdbp->To_SetCols; // ???
+ Degree = tdbp->Degree;
Mode = tdbp->Mode;
Cardinal = tdbp->Cardinal;
- } // end of TDB copy constructor
+ MaxSize = tdbp->MaxSize;
+ Read_Only = tdbp->IsReadOnly();
+ m_data_charset = tdbp->data_charset();
+ csname = tdbp->csname;
+} // end of TDB copy constructor
// Methods
+/***********************************************************************/
+/* Return the pointer on the charset of this table. */
+/***********************************************************************/
+CHARSET_INFO *TDB::data_charset(void)
+{
+ // If no DATA_CHARSET is specified, we assume that character
+ // set of the remote data is the same with CHARACTER SET
+ // definition of the SQL column.
+ return m_data_charset ? m_data_charset : &my_charset_bin;
+} // end of data_charset
+
+/***********************************************************************/
+/* Return the datapath of the DB this table belongs to. */
+/***********************************************************************/
+PSZ TDB::GetPath(void)
+{
+ return To_Def->GetPath();
+} // end of GetPath
+
+/***********************************************************************/
+/* Return true if name is a special column of this table. */
+/***********************************************************************/
+bool TDB::IsSpecial(PSZ name)
+{
+ for (PCOLDEF cdp = To_Def->GetCols(); cdp; cdp = cdp->GetNext())
+ if (!stricmp(cdp->GetName(), name) && (cdp->Flags & U_SPECIAL))
+ return true; // Special column to ignore while inserting
+
+ return false; // Not found or not special or not inserting
+} // end of IsSpecial
+
+/***********************************************************************/
+/* Initialize TDB based column description block construction. */
+/* name is used to call columns by name. */
+/* num is used by TBL to construct columns by index number. */
+/* Note: name=Null and num=0 for constructing all columns (select *) */
+/***********************************************************************/
+PCOL TDB::ColDB(PGLOBAL g, PSZ name, int num)
+{
+ int i;
+ PCOLDEF cdp;
+ PCOL cp, colp = NULL, cprec = NULL;
+
+ if (trace)
+ htrc("ColDB: am=%d colname=%s tabname=%s num=%d\n",
+ GetAmType(), SVP(name), Name, num);
+
+ for (cdp = To_Def->GetCols(), i = 1; cdp; cdp = cdp->GetNext(), i++)
+ if ((!name && !num) ||
+ (name && !stricmp(cdp->GetName(), name)) || num == i) {
+ /*****************************************************************/
+ /* Check for existence of desired column. */
+ /* Also find where to insert the new block. */
+ /*****************************************************************/
+ for (cp = Columns; cp; cp = cp->GetNext())
+ if ((num && cp->GetIndex() == i) ||
+ (name && !stricmp(cp->GetName(), name)))
+ break; // Found
+ else if (cp->GetIndex() < i)
+ cprec = cp;
+
+ if (trace)
+ htrc("cdp(%d).Name=%s cp=%p\n", i, cdp->GetName(), cp);
+
+ /*****************************************************************/
+ /* Now take care of Column Description Block. */
+ /*****************************************************************/
+ if (cp)
+ colp = cp;
+ else if (!(cdp->Flags & U_SPECIAL))
+ colp = MakeCol(g, cdp, cprec, i);
+ else if (Mode != MODE_INSERT)
+ colp = InsertSpcBlk(g, cdp);
+
+ if (trace)
+ htrc("colp=%p\n", colp);
+
+ if (name || num)
+ break;
+ else if (colp && !colp->IsSpecial())
+ cprec = colp;
+
+ } // endif Name
+
+ return (colp);
+} // end of ColDB
+
+/***********************************************************************/
+/* InsertSpecialColumn: Put a special column ahead of the column list.*/
+/***********************************************************************/
+PCOL TDB::InsertSpecialColumn(PCOL colp)
+{
+ if (!colp->IsSpecial())
+ return NULL;
+
+ colp->SetNext(Columns);
+ Columns = colp;
+ return colp;
+} // end of InsertSpecialColumn
+
+/***********************************************************************/
+/* Make a special COLBLK to insert in a table. */
+/***********************************************************************/
+PCOL TDB::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp)
+{
+ //char *name = cdp->GetName();
+ char *name = cdp->GetFmt();
+ PCOLUMN cp;
+ PCOL colp;
+
+ cp = new(g)COLUMN(cdp->GetName());
+
+ if (!To_Table) {
+ strcpy(g->Message, "Cannot make special column: To_Table is NULL");
+ return NULL;
+ } else
+ cp->SetTo_Table(To_Table);
+
+ if (!stricmp(name, "FILEID") || !stricmp(name, "FDISK") ||
+ !stricmp(name, "FPATH") || !stricmp(name, "FNAME") ||
+ !stricmp(name, "FTYPE") || !stricmp(name, "SERVID")) {
+ if (!To_Def || !(To_Def->GetPseudo() & 2)) {
+ sprintf(g->Message, MSG(BAD_SPEC_COLUMN));
+ return NULL;
+ } // endif Pseudo
+
+ if (!stricmp(name, "FILEID"))
+ colp = new(g)FIDBLK(cp, OP_XX);
+ else if (!stricmp(name, "FDISK"))
+ colp = new(g)FIDBLK(cp, OP_FDISK);
+ else if (!stricmp(name, "FPATH"))
+ colp = new(g)FIDBLK(cp, OP_FPATH);
+ else if (!stricmp(name, "FNAME"))
+ colp = new(g)FIDBLK(cp, OP_FNAME);
+ else if (!stricmp(name, "FTYPE"))
+ colp = new(g)FIDBLK(cp, OP_FTYPE);
+ else
+ colp = new(g)SIDBLK(cp);
+
+ } else if (!stricmp(name, "TABID")) {
+ colp = new(g)TIDBLK(cp);
+ } else if (!stricmp(name, "PARTID")) {
+ colp = new(g)PRTBLK(cp);
+ //} else if (!stricmp(name, "CONID")) {
+ // colp = new(g) CIDBLK(cp);
+ } else if (!stricmp(name, "ROWID")) {
+ colp = new(g)RIDBLK(cp, false);
+ } else if (!stricmp(name, "ROWNUM")) {
+ colp = new(g)RIDBLK(cp, true);
+ } else {
+ sprintf(g->Message, MSG(BAD_SPECIAL_COL), name);
+ return NULL;
+ } // endif's name
+
+ if (!(colp = InsertSpecialColumn(colp))) {
+ sprintf(g->Message, MSG(BAD_SPECIAL_COL), name);
+ return NULL;
+ } // endif Insert
+
+ return (colp);
+} // end of InsertSpcBlk
+
+/***********************************************************************/
+/* Marks DOS/MAP table columns used in internal joins. */
+/* tdb2 is the top of tree or first tdb in chained tdb's and tdbp */
+/* points to the currently marked tdb. */
+/* Two questions here: exact meaning of U_J_INT ? */
+/* Why is the eventual reference to To_Key_Col not marked U_J_EXT ? */
+/***********************************************************************/
+void TDB::MarkDB(PGLOBAL, PTDB tdb2)
+{
+ if (trace)
+ htrc("DOS MarkDB: tdbp=%p tdb2=%p\n", this, tdb2);
+
+} // end of MarkDB
/***********************************************************************/
/* RowNumber: returns the current row ordinal number. */
@@ -86,7 +274,7 @@ PTDB TDB::Copy(PTABS t)
//PGLOBAL g = t->G; // Is this really useful ???
for (tdb1 = this; tdb1; tdb1 = tdb1->Next) {
- tp = tdb1->CopyOne(t);
+ tp = tdb1->Clone(t);
if (!outp)
outp = tp;
@@ -100,6 +288,15 @@ PTDB TDB::Copy(PTABS t)
return outp;
} // end of Copy
+/***********************************************************************/
+/* SetRecpos: Replace the table at the specified position. */
+/***********************************************************************/
+bool TDB::SetRecpos(PGLOBAL g, int)
+{
+ strcpy(g->Message, MSG(SETRECPOS_NIY));
+ return true;
+} // end of SetRecpos
+
void TDB::Print(PGLOBAL g, FILE *f, uint n)
{
PCOL cp;
@@ -135,34 +332,34 @@ void TDB::Print(PGLOBAL, char *ps, uint)
/***********************************************************************/
TDBASE::TDBASE(PTABDEF tdp) : TDB(tdp)
{
- To_Def = tdp;
+//To_Def = tdp;
To_Link = NULL;
To_Key_Col = NULL;
To_Kindex = NULL;
To_Xdp = NULL;
- To_SetCols = NULL;
+//To_SetCols = NULL;
Ftype = RECFM_NAF;
- MaxSize = -1;
+//MaxSize = -1;
Knum = 0;
- Read_Only = (tdp) ? tdp->IsReadOnly() : false;
- m_data_charset= (tdp) ? tdp->data_charset() : NULL;
- csname = (tdp) ? tdp->csname : NULL;
+//Read_Only = (tdp) ? tdp->IsReadOnly() : false;
+//m_data_charset= (tdp) ? tdp->data_charset() : NULL;
+//csname = (tdp) ? tdp->csname : NULL;
} // end of TDBASE constructor
TDBASE::TDBASE(PTDBASE tdbp) : TDB(tdbp)
{
- To_Def = tdbp->To_Def;
+//To_Def = tdbp->To_Def;
To_Link = tdbp->To_Link;
To_Key_Col = tdbp->To_Key_Col;
To_Kindex = tdbp->To_Kindex;
To_Xdp = tdbp->To_Xdp;
- To_SetCols = tdbp->To_SetCols; // ???
+//To_SetCols = tdbp->To_SetCols; // ???
Ftype = tdbp->Ftype;
- MaxSize = tdbp->MaxSize;
+//MaxSize = tdbp->MaxSize;
Knum = tdbp->Knum;
- Read_Only = tdbp->Read_Only;
- m_data_charset= tdbp->m_data_charset;
- csname = tdbp->csname;
+//Read_Only = tdbp->Read_Only;
+//m_data_charset= tdbp->m_data_charset;
+//csname = tdbp->csname;
} // end of TDBASE copy constructor
/***********************************************************************/
@@ -173,6 +370,7 @@ PCATLG TDBASE::GetCat(void)
return (To_Def) ? To_Def->GetCat() : NULL;
} // end of GetCat
+#if 0
/***********************************************************************/
/* Return the pointer on the charset of this table. */
/***********************************************************************/
@@ -334,6 +532,7 @@ PCOL TDBASE::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp)
return (colp);
} // end of InsertSpcBlk
+#endif // 0
/***********************************************************************/
/* ResetTableOpt: Wrong for this table type. */
@@ -362,6 +561,7 @@ void TDBASE::ResetKindex(PGLOBAL g, PKXBASE kxp)
To_Kindex = kxp;
} // end of ResetKindex
+#if 0
/***********************************************************************/
/* SetRecpos: Replace the table at the specified position. */
/***********************************************************************/
@@ -370,6 +570,7 @@ bool TDBASE::SetRecpos(PGLOBAL g, int)
strcpy(g->Message, MSG(SETRECPOS_NIY));
return true;
} // end of SetRecpos
+#endif // 0
/***********************************************************************/
/* Methods */
@@ -379,6 +580,7 @@ void TDBASE::PrintAM(FILE *f, char *m)
fprintf(f, "%s AM(%d): mode=%d\n", m, GetAmType(), Mode);
} // end of PrintAM
+#if 0
/***********************************************************************/
/* Marks DOS/MAP table columns used in internal joins. */
/* tdb2 is the top of tree or first tdb in chained tdb's and tdbp */
@@ -392,6 +594,7 @@ void TDBASE::MarkDB(PGLOBAL, PTDB tdb2)
htrc("DOS MarkDB: tdbp=%p tdb2=%p\n", this, tdb2);
} // end of MarkDB
+#endif // 0
/* ---------------------------TDBCAT class --------------------------- */
diff --git a/storage/connect/tabmac.cpp b/storage/connect/tabmac.cpp
index e6e2abb54e2..bbaba591540 100644
--- a/storage/connect/tabmac.cpp
+++ b/storage/connect/tabmac.cpp
@@ -12,7 +12,7 @@
#include "global.h"
#include "plgdbsem.h"
//#include "catalog.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
#include "colblk.h"
#include "tabmac.h"
diff --git a/storage/connect/tabmac.h b/storage/connect/tabmac.h
index f9a66e82eaa..47565bb2541 100644
--- a/storage/connect/tabmac.h
+++ b/storage/connect/tabmac.h
@@ -52,7 +52,7 @@ class TDBMAC : public TDBASE {
//virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBMAC(g, this);}
// Methods
-//virtual PTDB CopyOne(PTABS t);
+//virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return N;}
virtual int RowNumber(PGLOBAL g, bool b = false) {return N;}
diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp
index ea287558b44..78adde81d12 100644
--- a/storage/connect/tabmul.cpp
+++ b/storage/connect/tabmul.cpp
@@ -1,11 +1,11 @@
/************* TabMul C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABMUL */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to PlugDB Software Development 2003 - 2015 */
+/* (C) Copyright to PlugDB Software Development 2003 - 2017 */
/* Author: Olivier BERTRAND */
/* */
/* WHAT THIS PROGRAM DOES: */
@@ -73,7 +73,7 @@
/***********************************************************************/
/* TABMUL constructors. */
/***********************************************************************/
-TDBMUL::TDBMUL(PTDBASE tdbp) : TDBASE(tdbp->GetDef())
+TDBMUL::TDBMUL(PTDB tdbp) : TDBASE(tdbp->GetDef())
{
Tdbp = tdbp;
Filenames = NULL;
@@ -94,22 +94,22 @@ TDBMUL::TDBMUL(PTDBMUL tdbp) : TDBASE(tdbp)
} // end of TDBMUL copy constructor
// Method
-PTDB TDBMUL::CopyOne(PTABS t)
+PTDB TDBMUL::Clone(PTABS t)
{
PTDBMUL tp;
PGLOBAL g = t->G; // Is this really useful ???
tp = new(g) TDBMUL(this);
- tp->Tdbp = (PTDBASE)Tdbp->CopyOne(t);
+ tp->Tdbp = Tdbp->Clone(t);
tp->Columns = tp->Tdbp->GetColumns();
return tp;
- } // end of CopyOne
+ } // end of Clone
PTDB TDBMUL::Duplicate(PGLOBAL g)
{
PTDBMUL tmup = new(g) TDBMUL(this);
- tmup->Tdbp = (PTDBASE)Tdbp->Duplicate(g);
+ tmup->Tdbp = Tdbp->Duplicate(g);
return tmup;
} // end of Duplicate
@@ -658,7 +658,7 @@ TDBDIR::TDBDIR(PTDBDIR tdbp) : TDBASE(tdbp)
} // end of TDBDIR copy constructor
// Method
-PTDB TDBDIR::CopyOne(PTABS t)
+PTDB TDBDIR::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G; // Is this really useful ???
@@ -666,7 +666,7 @@ PTDB TDBDIR::CopyOne(PTABS t)
tp = new(g) TDBDIR(this);
tp->SetColumns(Columns);
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Initialize/get the components of the search file pattern. */
@@ -974,7 +974,7 @@ TDBSDR::TDBSDR(PTDBSDR tdbp) : TDBDIR(tdbp)
} // end of TDBSDR copy constructor
// Method
-PTDB TDBSDR::CopyOne(PTABS t)
+PTDB TDBSDR::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G; // Is this really useful ???
@@ -982,7 +982,7 @@ PTDB TDBSDR::CopyOne(PTABS t)
tp = new(g) TDBSDR(this);
tp->SetColumns(Columns);
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* SDR GetMaxSize: returns the number of retrieved files. */
@@ -1251,7 +1251,7 @@ TDBDHR::TDBDHR(PTDBDHR tdbp) : TDBASE(tdbp)
} // end of TDBDHR copy constructor
// Method
-PTDB TDBDHR::CopyOne(PTABS t)
+PTDB TDBDHR::Clone(PTABS t)
{
PTDB tp;
PGLOBAL g = t->G; // Is this really useful ???
@@ -1259,7 +1259,7 @@ PTDB TDBDHR::CopyOne(PTABS t)
tp = new(g) TDBDHR(this);
tp->Columns = Columns;
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate DHR column description block. */
diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h
index 433cc3a2ee3..51fa7f9000a 100644
--- a/storage/connect/tabmul.h
+++ b/storage/connect/tabmul.h
@@ -1,7 +1,7 @@
/*************** Tabmul H Declares Source Code File (.H) ***************/
-/* Name: TABMUL.H Version 1.4 */
+/* Name: TABMUL.H Version 1.5 */
/* */
-/* (C) Copyright to PlugDB Software Development 2003-2012 */
+/* (C) Copyright to PlugDB Software Development 2003-2017 */
/* Author: Olivier BERTRAND */
/* */
/* This file contains the TDBMUL and TDBDIR classes declares. */
@@ -28,7 +28,7 @@ class DllExport TDBMUL : public TDBASE {
//friend class MULCOL;
public:
// Constructor
- TDBMUL(PTDBASE tdbp);
+ TDBMUL(PTDB tdbp);
TDBMUL(PTDBMUL tdbp);
// Implementation
@@ -37,7 +37,7 @@ class DllExport TDBMUL : public TDBASE {
// Methods
virtual void ResetDB(void);
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual bool IsSame(PTDB tp) {return tp == (PTDB)Tdbp;}
virtual PSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);}
virtual int GetRecpos(void) {return 0;}
@@ -61,7 +61,7 @@ class DllExport TDBMUL : public TDBASE {
protected:
// Members
- TDBASE *Tdbp; // Points to a (file) table class
+ PTDB Tdbp; // Points to a (file) table class
char* *Filenames; // Points to file names
int Rows; // Total rows of already read files
int Mul; // Type of multiple file list
@@ -112,7 +112,7 @@ class TDBDIR : public TDBASE {
{return (PTDB)new(g) TDBDIR(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return iFile;}
// Database routines
@@ -134,7 +134,7 @@ class TDBDIR : public TDBASE {
int iFile; // Index of currently retrieved file
#if defined(__WIN__)
_finddata_t FileData; // Find data structure
- int Hsearch; // Search handle
+ intptr_t Hsearch; // Search handle
char Drive[_MAX_DRIVE]; // Drive name
#else // !__WIN__
struct stat Fileinfo; // File info structure
@@ -168,7 +168,7 @@ class TDBSDR : public TDBDIR {
{return (PTDB)new(g) TDBSDR(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual int GetMaxSize(PGLOBAL g);
@@ -184,7 +184,7 @@ class TDBSDR : public TDBDIR {
struct _Sub_Dir *Next;
struct _Sub_Dir *Prev;
#if defined(__WIN__)
- int H; // Search handle
+ intptr_t H; // Search handle
#else // !__WIN__
DIR *D;
#endif // !__WIN__
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index 98a476bf94f..1a715819fc8 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -1,11 +1,11 @@
/************* TabMySQL C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABMYSQL */
/* ------------- */
-/* Version 1.9 */
+/* Version 2.0 */
/* */
/* AUTHOR: */
/* ------- */
-/* Olivier BERTRAND 2007-2015 */
+/* Olivier BERTRAND 2007-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -54,9 +54,10 @@
#include "global.h"
#include "plgdbsem.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabcol.h"
#include "colblk.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabmysql.h"
#include "valblk.h"
#include "tabutil.h"
@@ -84,16 +85,16 @@ MYSQLDEF::MYSQLDEF(void)
{
Pseudo = 2; // SERVID is Ok but not ROWID
Hostname = NULL;
- Database = NULL;
- Tabname = NULL;
- Srcdef = NULL;
- Username = NULL;
- Password = NULL;
+//Tabschema = NULL;
+//Tabname = NULL;
+//Srcdef = NULL;
+//Username = NULL;
+//Password = NULL;
Portnumber = 0;
Isview = false;
Bind = false;
Delayed = false;
- Xsrc = false;
+//Xsrc = false;
Huge = false;
} // end of MYSQLDEF constructor
@@ -128,7 +129,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name)
// TODO: We need to examine which of these can really be NULL
Hostname = PlugDup(g, server->host);
- Database = PlugDup(g, server->db);
+ Tabschema = PlugDup(g, server->db);
Username = PlugDup(g, server->username);
Password = PlugDup(g, server->password);
Portnumber = (server->port) ? server->port : GetDefaultPort();
@@ -200,7 +201,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL;
if (trace)
- htrc("server: %s Tabname: %s", url, Tabname);
+ htrc("server: %s TableName: %s", url, Tabname);
Server = url;
return GetServerInfo(g, url);
@@ -253,10 +254,10 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
return true;
} // endif
- if ((Database = strchr(Hostname, '/'))) {
- *Database++ = 0;
+ if ((Tabschema = strchr(Hostname, '/'))) {
+ *Tabschema++ = 0;
- if ((Tabname = strchr(Database, '/'))) {
+ if ((Tabname = strchr(Tabschema, '/'))) {
*Tabname++ = 0;
// Make sure there's not an extra /
@@ -265,7 +266,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
return true;
} // endif /
- } // endif Tabname
+ } // endif TableName
} // endif database
@@ -283,8 +284,8 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b)
if (Hostname[0] == 0)
Hostname = (b) ? GetStringCatInfo(g, "Host", "localhost") : NULL;
- if (!Database || !*Database)
- Database = (b) ? GetStringCatInfo(g, "Database", "*") : NULL;
+ if (!Tabschema || !*Tabschema)
+ Tabschema = (b) ? GetStringCatInfo(g, "Database", "*") : NULL;
if (!Tabname || !*Tabname)
Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL;
@@ -320,7 +321,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
if (!url || !*url) {
// Not using the connection URL
Hostname = GetStringCatInfo(g, "Host", "localhost");
- Database = GetStringCatInfo(g, "Database", "*");
+ Tabschema = GetStringCatInfo(g, "Database", "*");
Tabname = GetStringCatInfo(g, "Name", Name); // Deprecated
Tabname = GetStringCatInfo(g, "Tabname", Tabname);
Username = GetStringCatInfo(g, "User", "*");
@@ -334,7 +335,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
Delayed = !!GetIntCatInfo("Delayed", 0);
} else {
// MYSQL access from a PROXY table
- Database = GetStringCatInfo(g, "Database", Schema ? Schema : PlugDup(g, "*"));
+ Tabschema = GetStringCatInfo(g, "Database", Tabschema ? Tabschema : PlugDup(g, "*"));
Isview = GetBoolCatInfo("View", false);
// We must get other connection parms from the calling table
@@ -348,12 +349,12 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
Portnumber = GetIntCatInfo("Port", GetDefaultPort());
Server = Hostname;
} else {
- char *locdb = Database;
+ char *locdb = Tabschema;
if (ParseURL(g, url))
return true;
- Database = locdb;
+ Tabschema = locdb;
} // endif url
Tabname = Name;
@@ -362,7 +363,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) {
Read_Only = true;
Isview = true;
- } else if (CheckSelf(g, Hc->GetTable()->s, Hostname, Database,
+ } else if (CheckSelf(g, Hc->GetTable()->s, Hostname, Tabschema,
Tabname, Srcdef, Portnumber))
return true;
@@ -372,7 +373,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
// Specific for command executing tables
Xsrc = GetBoolCatInfo("Execsrc", false);
- Mxr = GetIntCatInfo("Maxerr", 0);
+ Maxerr = GetIntCatInfo("Maxerr", 0);
Huge = GetBoolCatInfo("Huge", false);
return false;
} // end of DefineAM
@@ -396,17 +397,17 @@ PTDB MYSQLDEF::GetTable(PGLOBAL g, MODE)
/***********************************************************************/
/* Implementation of the TDBMYSQL class. */
/***********************************************************************/
-TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBASE(tdp)
+TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBEXT(tdp)
{
if (tdp) {
Host = tdp->Hostname;
- Database = tdp->Database;
- Tabname = tdp->Tabname;
- Srcdef = tdp->Srcdef;
- User = tdp->Username;
- Pwd = tdp->Password;
+// Schema = tdp->Tabschema;
+// TableName = tdp->Tabname;
+// Srcdef = tdp->Srcdef;
+// User = tdp->Username;
+// Pwd = tdp->Password;
Server = tdp->Server;
- Qrystr = tdp->Qrystr;
+// Qrystr = tdp->Qrystr;
Quoted = MY_MAX(0, tdp->Quoted);
Port = tdp->Portnumber;
Isview = tdp->Isview;
@@ -415,14 +416,14 @@ TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBASE(tdp)
Myc.m_Use = tdp->Huge;
} else {
Host = NULL;
- Database = NULL;
- Tabname = NULL;
- Srcdef = NULL;
- User = NULL;
- Pwd = NULL;
+// Schema = NULL;
+// TableName = NULL;
+// Srcdef = NULL;
+// User = NULL;
+// Pwd = NULL;
Server = NULL;
- Qrystr = NULL;
- Quoted = 0;
+// Qrystr = NULL;
+// Quoted = 0;
Port = 0;
Isview = false;
Prep = false;
@@ -430,39 +431,40 @@ TDBMYSQL::TDBMYSQL(PMYDEF tdp) : TDBASE(tdp)
} // endif tdp
Bind = NULL;
- Query = NULL;
+//Query = NULL;
Fetched = false;
m_Rc = RC_FX;
- AftRows = 0;
+//AftRows = 0;
N = -1;
- Nparm = 0;
+//Nparm = 0;
} // end of TDBMYSQL constructor
-TDBMYSQL::TDBMYSQL(PTDBMY tdbp) : TDBASE(tdbp)
+TDBMYSQL::TDBMYSQL(PTDBMY tdbp) : TDBEXT(tdbp)
{
Host = tdbp->Host;
- Database = tdbp->Database;
- Tabname = tdbp->Tabname;
- Srcdef = tdbp->Srcdef;
- User = tdbp->User;
- Pwd = tdbp->Pwd;
- Qrystr = tdbp->Qrystr;
- Quoted = tdbp->Quoted;
+//Schema = tdbp->Schema;
+//TableName = tdbp->TableName;
+//Srcdef = tdbp->Srcdef;
+//User = tdbp->User;
+//Pwd = tdbp->Pwd;
+//Qrystr = tdbp->Qrystr;
+//Quoted = tdbp->Quoted;
+ Server = tdbp->Server;
Port = tdbp->Port;
Isview = tdbp->Isview;
Prep = tdbp->Prep;
Delayed = tdbp->Delayed;
Bind = NULL;
- Query = tdbp->Query;
+//Query = tdbp->Query;
Fetched = tdbp->Fetched;
m_Rc = tdbp->m_Rc;
- AftRows = tdbp->AftRows;
+//AftRows = tdbp->AftRows;
N = tdbp->N;
- Nparm = tdbp->Nparm;
+//Nparm = tdbp->Nparm;
} // end of TDBMYSQL copy constructor
-// Is this really useful ???
-PTDB TDBMYSQL::CopyOne(PTABS t)
+// Is this really useful ??? --> Yes for UPDATE
+PTDB TDBMYSQL::Clone(PTABS t)
{
PTDB tp;
PCOL cp1, cp2;
@@ -477,7 +479,7 @@ PTDB TDBMYSQL::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate MYSQL column description block. */
@@ -504,10 +506,18 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
if (Query)
return false; // already done
- if (Srcdef) {
- Query = new(g)STRING(g, 0, Srcdef);
- return false;
- } // endif Srcdef
+ if (Srcdef) {
+ if (strstr(Srcdef, "%s")) {
+ char *fil;
+
+ fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1");
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil));
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
+ return false;
+ } // endif Srcdef
// Allocate the string used to contain Query
Query = new(g) STRING(g, 1023, "SELECT ");
@@ -540,7 +550,7 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx)
oom |= Query->Append(" FROM ");
oom |= Query->Append(tk);
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append(tk);
len = Query->GetLength();
@@ -608,7 +618,7 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
} // endif colp
// Below 40 is enough to contain the fixed part of the query
- len += (strlen(Tabname) + 40);
+ len += (strlen(TableName) + 40);
Query = new(g) STRING(g, len);
if (Delayed)
@@ -617,7 +627,7 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
oom = Query->Set("INSERT INTO ");
oom |= Query->Append(tk);
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append("` (");
for (colp = Columns; colp; colp = colp->GetNext()) {
@@ -653,11 +663,11 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g)
/* MakeCommand: make the Update or Delete statement to send to the */
/* MySQL server. Limited to remote values and filtering. */
/***********************************************************************/
-int TDBMYSQL::MakeCommand(PGLOBAL g)
+bool TDBMYSQL::MakeCommand(PGLOBAL g)
{
Query = new(g) STRING(g, strlen(Qrystr) + 64);
- if (Quoted > 0 || stricmp(Name, Tabname)) {
+ if (Quoted > 0 || stricmp(Name, TableName)) {
char *p, *qrystr, name[68];
bool qtd = Quoted > 0;
@@ -678,29 +688,29 @@ int TDBMYSQL::MakeCommand(PGLOBAL g)
if (qtd && *(p-1) == ' ') {
oom |= Query->Append('`');
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append('`');
} else
- oom |= Query->Append(Tabname);
+ oom |= Query->Append(TableName);
oom |= Query->Append(Qrystr + (p - qrystr) + strlen(name));
if (oom) {
strcpy(g->Message, "MakeCommand: Out of memory");
- return RC_FX;
+ return true;
} else
strlwr(strcpy(qrystr, Query->GetStr()));
} else {
sprintf(g->Message, "Cannot use this %s command",
(Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return RC_FX;
+ return true;
} // endif p
} else
(void)Query->Set(Qrystr);
- return RC_OK;
+ return false;
} // end of MakeCommand
#if 0
@@ -727,7 +737,7 @@ int TDBMYSQL::MakeUpdate(PGLOBAL g)
} // endif sscanf
assert(!stricmp(cmd, "update"));
- strcat(strcat(strcat(strcpy(Query, "UPDATE "), qc), Tabname), qc);
+ strcat(strcat(strcat(strcpy(Query, "UPDATE "), qc), TableName), qc);
strcat(Query, end);
return RC_OK;
} // end of MakeUpdate
@@ -754,7 +764,7 @@ int TDBMYSQL::MakeDelete(PGLOBAL g)
} // endif sscanf
assert(!stricmp(cmd, "delete") && !stricmp(from, "from"));
- strcat(strcat(strcat(strcpy(Query, "DELETE FROM "), qc), Tabname), qc);
+ strcat(strcat(strcat(strcpy(Query, "DELETE FROM "), qc), TableName), qc);
if (*end)
strcat(Query, end);
@@ -776,15 +786,15 @@ int TDBMYSQL::Cardinality(PGLOBAL g)
char query[96];
MYSQLC myc;
- if (myc.Open(g, Host, Database, User, Pwd, Port, csname))
+ if (myc.Open(g, Host, Schema, User, Pwd, Port, csname))
return -1;
strcpy(query, "SELECT COUNT(*) FROM ");
if (Quoted > 0)
- strcat(strcat(strcat(query, "`"), Tabname), "`");
+ strcat(strcat(strcat(query, "`"), TableName), "`");
else
- strcat(query, Tabname);
+ strcat(query, TableName);
Cardinal = myc.GetTableSize(g, query);
myc.Close();
@@ -794,6 +804,7 @@ int TDBMYSQL::Cardinality(PGLOBAL g)
return Cardinal;
} // end of Cardinality
+#if 0
/***********************************************************************/
/* MYSQL GetMaxSize: returns the maximum number of rows in the table. */
/***********************************************************************/
@@ -812,6 +823,7 @@ int TDBMYSQL::GetMaxSize(PGLOBAL g)
return MaxSize;
} // end of GetMaxSize
+#endif // 0
/***********************************************************************/
/* This a fake routine as ROWID does not exist in MySQL. */
@@ -872,7 +884,7 @@ bool TDBMYSQL::OpenDB(PGLOBAL g)
/* servers allowing concurency in getting results ??? */
/*********************************************************************/
if (!Myc.Connected()) {
- if (Myc.Open(g, Host, Database, User, Pwd, Port, csname))
+ if (Myc.Open(g, Host, Schema, User, Pwd, Port, csname))
return true;
} // endif Connected
@@ -931,14 +943,14 @@ bool TDBMYSQL::OpenDB(PGLOBAL g)
char cmd[64];
int w;
- sprintf(cmd, "ALTER TABLE `%s` DISABLE KEYS", Tabname);
+ sprintf(cmd, "ALTER TABLE `%s` DISABLE KEYS", TableName);
m_Rc = Myc.ExecSQL(g, cmd, &w); // may fail for some engines
} // endif m_Rc
} else
// m_Rc = (Mode == MODE_DELETE) ? MakeDelete(g) : MakeUpdate(g);
- m_Rc = MakeCommand(g);
+ m_Rc = (MakeCommand(g)) ? RC_FX : RC_OK;
if (m_Rc == RC_FX) {
Myc.Close();
@@ -1030,7 +1042,7 @@ int TDBMYSQL::SendCommand(PGLOBAL g)
if (Myc.ExecSQLcmd(g, Query->GetStr(), &w) == RC_NF) {
AftRows = Myc.m_Afrw;
- sprintf(g->Message, "%s: %d affected rows", Tabname, AftRows);
+ sprintf(g->Message, "%s: %d affected rows", TableName, AftRows);
PushWarning(g, this, 0); // 0 means a Note
if (trace)
@@ -1039,7 +1051,7 @@ int TDBMYSQL::SendCommand(PGLOBAL g)
if (w && Myc.ExecSQL(g, "SHOW WARNINGS") == RC_OK) {
// We got warnings from the remote server
while (Myc.Fetch(g, -1) == RC_OK) {
- sprintf(g->Message, "%s: (%s) %s", Tabname,
+ sprintf(g->Message, "%s: (%s) %s", TableName,
Myc.GetCharField(1), Myc.GetCharField(2));
PushWarning(g, this);
} // endwhile Fetch
@@ -1116,8 +1128,7 @@ int TDBMYSQL::ReadDB(PGLOBAL g)
int rc;
if (trace > 1)
- htrc("MySQL ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
+ htrc("MySQL ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE)
return SendCommand(g);
@@ -1205,7 +1216,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g)
PDBUSER dup = PlgGetUser(g);
dup->Step = "Enabling indexes";
- sprintf(cmd, "ALTER TABLE `%s` ENABLE KEYS", Tabname);
+ sprintf(cmd, "ALTER TABLE `%s` ENABLE KEYS", TableName);
Myc.m_Rows = -1; // To execute the query
m_Rc = Myc.ExecSQL(g, cmd, &w); // May fail for some engines
} // endif m_Rc
@@ -1463,7 +1474,7 @@ TDBMYEXC::TDBMYEXC(PMYDEF tdp) : TDBMYSQL(tdp)
Havew = false;
Isw = false;
Warnings = 0;
- Mxr = tdp->Mxr;
+ Mxr = tdp->Maxerr;
Nerr = 0;
} // end of TDBMYEXC constructor
@@ -1479,7 +1490,7 @@ TDBMYEXC::TDBMYEXC(PTDBMYX tdbp) : TDBMYSQL(tdbp)
} // end of TDBMYEXC copy constructor
// Is this really useful ???
-PTDB TDBMYEXC::CopyOne(PTABS t)
+PTDB TDBMYEXC::Clone(PTABS t)
{
PTDB tp;
PCOL cp1, cp2;
@@ -1494,7 +1505,7 @@ PTDB TDBMYEXC::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate MYSQL column description block. */
@@ -1565,7 +1576,7 @@ bool TDBMYEXC::OpenDB(PGLOBAL g)
/* servers allowing concurency in getting results ??? */
/*********************************************************************/
if (!Myc.Connected())
- if (Myc.Open(g, Host, Database, User, Pwd, Port))
+ if (Myc.Open(g, Host, Schema, User, Pwd, Port))
return true;
Use = USE_OPEN; // Do it now in case we are recursively called
@@ -1728,7 +1739,7 @@ void MYXCOL::WriteColumn(PGLOBAL)
TDBMCL::TDBMCL(PMYDEF tdp) : TDBCAT(tdp)
{
Host = tdp->Hostname;
- Db = tdp->Database;
+ Db = tdp->Tabschema;
Tab = tdp->Tabname;
User = tdp->Username;
Pwd = tdp->Password;
diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h
index edb15b5cca6..050fa59259b 100644
--- a/storage/connect/tabmysql.h
+++ b/storage/connect/tabmysql.h
@@ -1,4 +1,4 @@
-// TDBMYSQL.H Olivier Bertrand 2007-2014
+// TDBMYSQL.H Olivier Bertrand 2007-2017
#include "myconn.h" // MySQL connection declares
typedef class MYSQLDEF *PMYDEF;
@@ -18,7 +18,7 @@ typedef class MYSQLC *PMYC;
/***********************************************************************/
/* MYSQL table. */
/***********************************************************************/
-class MYSQLDEF : public TABDEF {/* Logical table description */
+class MYSQLDEF : public EXTDEF {/* Logical table description */
friend class TDBMYSQL;
friend class TDBMYEXC;
friend class TDBMCL;
@@ -27,19 +27,18 @@ class MYSQLDEF : public TABDEF {/* Logical table description */
// Constructor
MYSQLDEF(void);
-
// Implementation
virtual const char *GetType(void) {return "MYSQL";}
inline PSZ GetHostname(void) {return Hostname;};
- inline PSZ GetDatabase(void) {return Database;};
- inline PSZ GetTabname(void) {return Tabname;}
- inline PSZ GetSrcdef(void) {return Srcdef;}
- inline PSZ GetUsername(void) {return Username;};
- inline PSZ GetPassword(void) {return Password;};
+//inline PSZ GetDatabase(void) {return Tabschema;};
+//inline PSZ GetTabname(void) {return Tabname;}
+//inline PSZ GetSrcdef(void) {return Srcdef;}
+//inline PSZ GetUsername(void) {return Username;};
+//inline PSZ GetPassword(void) {return Password;};
inline int GetPortnumber(void) {return Portnumber;}
// Methods
- virtual int Indexable(void) {return 2;}
+//virtual int Indexable(void) {return 2;}
virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
virtual PTDB GetTable(PGLOBAL g, MODE m);
bool ParseURL(PGLOBAL g, char *url, bool b = true);
@@ -48,27 +47,27 @@ class MYSQLDEF : public TABDEF {/* Logical table description */
protected:
// Members
PSZ Hostname; /* Host machine to use */
- PSZ Database; /* Database to be used by server */
- PSZ Tabname; /* External table name */
- PSZ Srcdef; /* The source table SQL definition */
- PSZ Username; /* User logon name */
- PSZ Password; /* Password logon info */
+//PSZ Tabschema; /* Database to be used by server */
+//PSZ Tabname; /* External table name */
+//PSZ Srcdef; /* The source table SQL definition */
+//PSZ Username; /* User logon name */
+//PSZ Password; /* Password logon info */
PSZ Server; /* PServerID */
- PSZ Qrystr; /* The original query */
+//PSZ Qrystr; /* The original query */
int Portnumber; /* MySQL port number (0 = default) */
- int Mxr; /* Maxerr for an Exec table */
- int Quoted; /* Identifier quoting level */
+//int Maxerr; /* Maxerr for an Exec table */
+//int Quoted; /* Identifier quoting level */
bool Isview; /* true if this table is a MySQL view */
bool Bind; /* Use prepared statement on insert */
bool Delayed; /* Delayed insert */
- bool Xsrc; /* Execution type */
+//bool Xsrc; /* Execution type */
bool Huge; /* True for big table */
}; // end of MYSQLDEF
/***********************************************************************/
/* This is the class declaration for the MYSQL table. */
/***********************************************************************/
-class TDBMYSQL : public TDBASE {
+class TDBMYSQL : public TDBEXT {
friend class MYSQLCOL;
public:
// Constructor
@@ -80,7 +79,7 @@ class TDBMYSQL : public TDBASE {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBMYSQL(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
//virtual int GetAffectedRows(void) {return AftRows;}
virtual int GetRecpos(void) {return N;}
virtual int GetProgMax(PGLOBAL g);
@@ -88,12 +87,12 @@ class TDBMYSQL : public TDBASE {
virtual int RowNumber(PGLOBAL g, bool b = false);
virtual bool IsView(void) {return Isview;}
virtual PSZ GetServer(void) {return Server;}
- void SetDatabase(LPCSTR db) {Database = (char*)db;}
+ void SetDatabase(LPCSTR db) {Schema = (char*)db;}
- // Database routines
+ // Schema routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
+//virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -111,7 +110,7 @@ class TDBMYSQL : public TDBASE {
bool MakeSelect(PGLOBAL g, bool mx);
bool MakeInsert(PGLOBAL g);
int BindColumns(PGLOBAL g);
- int MakeCommand(PGLOBAL g);
+ virtual bool MakeCommand(PGLOBAL g);
//int MakeUpdate(PGLOBAL g);
//int MakeDelete(PGLOBAL g);
int SendCommand(PGLOBAL g);
@@ -119,25 +118,25 @@ class TDBMYSQL : public TDBASE {
// Members
MYSQLC Myc; // MySQL connection class
MYSQL_BIND *Bind; // To the MySQL bind structure array
- PSTRG Query; // Constructed SQL query
+//PSTRG Query; // Constructed SQL query
char *Host; // Host machine to use
- char *User; // User logon info
- char *Pwd; // Password logon info
- char *Database; // Database to be used by server
- char *Tabname; // External table name
- char *Srcdef; // The source table SQL definition
+//char *User; // User logon info
+//char *Pwd; // Password logon info
+//char *Schema; // Database to be used by server
+//char *TableName; // External table name
+//char *Srcdef; // The source table SQL definition
char *Server; // The server ID
- char *Qrystr; // The original query
+//char *Qrystr; // The original query
bool Fetched; // True when fetch was done
bool Isview; // True if this table is a MySQL view
bool Prep; // Use prepared statement on insert
bool Delayed; // Use delayed insert
int m_Rc; // Return code from command
- int AftRows; // The number of affected rows
+//int AftRows; // The number of affected rows
int N; // The current table index
int Port; // MySQL port number (0 = default)
- int Nparm; // The number of statement parameters
- int Quoted; // The identifier quoting level
+//int Nparm; // The number of statement parameters
+//int Quoted; // The identifier quoting level
}; // end of class TDBMYSQL
/***********************************************************************/
@@ -162,9 +161,6 @@ class MYSQLCOL : public COLBLK {
bool FindRank(PGLOBAL g);
protected:
- // Default constructor not to be used
- MYSQLCOL(void) {}
-
// Members
MYSQL_BIND *Bind; // This column bind structure pointer
PVAL To_Val; // To value used for Update/Insert
@@ -187,7 +183,7 @@ class TDBMYEXC : public TDBMYSQL {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBMYEXC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual bool IsView(void) {return Isview;}
// Database routines
@@ -228,9 +224,6 @@ class MYXCOL : public MYSQLCOL {
virtual void WriteColumn(PGLOBAL g);
protected:
- // Default constructor not to be used
- MYXCOL(void) {}
-
// Members
char *Buffer; // To get returned message
int Flag; // Column content desc
diff --git a/storage/connect/taboccur.cpp b/storage/connect/taboccur.cpp
index 07e260154e0..07272d1b298 100644
--- a/storage/connect/taboccur.cpp
+++ b/storage/connect/taboccur.cpp
@@ -1,7 +1,7 @@
/************ TabOccur CPP Declares Source Code File (.CPP) ************/
-/* Name: TABOCCUR.CPP Version 1.1 */
+/* Name: TABOCCUR.CPP Version 1.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2013 - 2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2013 - 2017 */
/* */
/* OCCUR: Table that provides a view of a source table where the */
/* contain of several columns of the source table is placed in only */
@@ -39,12 +39,13 @@
/***********************************************************************/
#include "global.h"
#include "plgdbsem.h"
-#include "reldef.h"
+#include "xtable.h"
+#include "tabext.h"
+//#include "reldef.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabcol.h"
#include "taboccur.h"
-#include "xtable.h"
#include "tabmysql.h"
#include "ha_connect.h"
diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp
index f3ffc99ac15..488acdd330d 100644
--- a/storage/connect/tabodbc.cpp
+++ b/storage/connect/tabodbc.cpp
@@ -67,10 +67,11 @@
#include "plgdbsem.h"
#include "mycat.h"
#include "xtable.h"
+#include "tabext.h"
#include "odbccat.h"
#include "tabodbc.h"
#include "tabmul.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "tabcol.h"
#include "valblk.h"
#include "ha_connect.h"
@@ -95,10 +96,9 @@ bool ExactInfo(void);
/***********************************************************************/
ODBCDEF::ODBCDEF(void)
{
- Connect = Tabname = Tabschema = Username = Password = NULL;
- Tabcat = Colpat = Srcdef = Qchar = Qrystr = Sep = NULL;
- Catver = Options = Cto = Qto = Quoted = Maxerr = Maxres = Memory = 0;
- Scrollable = Xsrc = UseCnc = false;
+ Connect = NULL;
+ Catver = 0;
+ UseCnc = false;
} // end of ODBCDEF constructor
/***********************************************************************/
@@ -113,47 +113,50 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
return true;
} // endif Connect
- Tabname = GetStringCatInfo(g, "Name",
- (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
- Tabname = GetStringCatInfo(g, "Tabname", Tabname);
- Tabschema = GetStringCatInfo(g, "Dbname", NULL);
- Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
- Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
- Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
- Username = GetStringCatInfo(g, "User", NULL);
- Password = GetStringCatInfo(g, "Password", NULL);
-
- if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
- Read_Only = true;
-
- Qrystr = GetStringCatInfo(g, "Query_String", "?");
- Sep = GetStringCatInfo(g, "Separator", NULL);
+ if (EXTDEF::DefineAM(g, am, poff))
+ return true;
+
+ // Tabname = GetStringCatInfo(g, "Name",
+ // (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+ // Tabname = GetStringCatInfo(g, "Tabname", Tabname);
+ // Tabschema = GetStringCatInfo(g, "Dbname", NULL);
+ // Tabschema = GetStringCatInfo(g, "Schema", Tabschema);
+ // Tabcat = GetStringCatInfo(g, "Qualifier", NULL);
+ // Tabcat = GetStringCatInfo(g, "Catalog", Tabcat);
+ //Username = GetStringCatInfo(g, "User", NULL);
+ // Password = GetStringCatInfo(g, "Password", NULL);
+
+ // if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL)))
+ // Read_Only = true;
+
+ // Qrystr = GetStringCatInfo(g, "Query_String", "?");
+ // Sep = GetStringCatInfo(g, "Separator", NULL);
Catver = GetIntCatInfo("Catver", 2);
- Xsrc = GetBoolCatInfo("Execsrc", FALSE);
- Maxerr = GetIntCatInfo("Maxerr", 0);
- Maxres = GetIntCatInfo("Maxres", 0);
- Quoted = GetIntCatInfo("Quoted", 0);
+ //Xsrc = GetBoolCatInfo("Execsrc", FALSE);
+ //Maxerr = GetIntCatInfo("Maxerr", 0);
+ //Maxres = GetIntCatInfo("Maxres", 0);
+ //Quoted = GetIntCatInfo("Quoted", 0);
Options = ODBConn::noOdbcDialog;
//Options = ODBConn::noOdbcDialog | ODBConn::useCursorLib;
Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT);
Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT);
- if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
- Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
+ //if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt)
+ // Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch
- if (Catfunc == FNC_COL)
- Colpat = GetStringCatInfo(g, "Colpat", NULL);
+ //if (Catfunc == FNC_COL)
+ // Colpat = GetStringCatInfo(g, "Colpat", NULL);
- if (Catfunc == FNC_TABLE)
- Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
+ //if (Catfunc == FNC_TABLE)
+ // Tabtyp = GetStringCatInfo(g, "Tabtype", NULL);
UseCnc = GetBoolCatInfo("UseDSN", false);
// Memory was Boolean, it is now integer
- if (!(Memory = GetIntCatInfo("Memory", 0)))
- Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
+ //if (!(Memory = GetIntCatInfo("Memory", 0)))
+ // Memory = GetBoolCatInfo("Memory", false) ? 1 : 0;
- Pseudo = 2; // FILID is Ok but not ROWID
+ //Pseudo = 2; // FILID is Ok but not ROWID
return false;
} // end of DefineAM
@@ -162,7 +165,7 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
/***********************************************************************/
PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
{
- PTDBASE tdbp = NULL;
+ PTDB tdbp = NULL;
/*********************************************************************/
/* Allocate a TDB of the proper type. */
@@ -200,103 +203,103 @@ PTDB ODBCDEF::GetTable(PGLOBAL g, MODE m)
/***********************************************************************/
/* Implementation of the TDBODBC class. */
/***********************************************************************/
-TDBODBC::TDBODBC(PODEF tdp) : TDBASE(tdp)
+TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp)
{
Ocp = NULL;
Cnp = NULL;
if (tdp) {
Connect = tdp->Connect;
- TableName = tdp->Tabname;
- Schema = tdp->Tabschema;
+ //TableName = tdp->Tabname;
+ //Schema = tdp->Tabschema;
Ops.User = tdp->Username;
Ops.Pwd = tdp->Password;
- Catalog = tdp->Tabcat;
- Srcdef = tdp->Srcdef;
- Qrystr = tdp->Qrystr;
- Sep = tdp->GetSep();
- Options = tdp->Options;
+ //Catalog = tdp->Tabcat;
+ //Srcdef = tdp->Srcdef;
+ //Qrystr = tdp->Qrystr;
+ //Sep = tdp->GetSep();
+ //Options = tdp->Options;
Ops.Cto = tdp->Cto;
Ops.Qto = tdp->Qto;
- Quoted = MY_MAX(0, tdp->GetQuoted());
- Rows = tdp->GetElemt();
+ //Quoted = MY_MAX(0, tdp->GetQuoted());
+ //Rows = tdp->GetElemt();
Catver = tdp->Catver;
- Memory = tdp->Memory;
- Scrollable = tdp->Scrollable;
+ //Memory = tdp->Memory;
+ //Scrollable = tdp->Scrollable;
Ops.UseCnc = tdp->UseCnc;
} else {
Connect = NULL;
- TableName = NULL;
- Schema = NULL;
+ //TableName = NULL;
+ //Schema = NULL;
Ops.User = NULL;
Ops.Pwd = NULL;
- Catalog = NULL;
- Srcdef = NULL;
- Qrystr = NULL;
- Sep = 0;
- Options = 0;
+ //Catalog = NULL;
+ //Srcdef = NULL;
+ //Qrystr = NULL;
+ //Sep = 0;
+ //Options = 0;
Ops.Cto = DEFAULT_LOGIN_TIMEOUT;
Ops.Qto = DEFAULT_QUERY_TIMEOUT;
- Quoted = 0;
- Rows = 0;
+ //Quoted = 0;
+ //Rows = 0;
Catver = 0;
- Memory = 0;
- Scrollable = false;
+ //Memory = 0;
+ //Scrollable = false;
Ops.UseCnc = false;
} // endif tdp
- Quote = NULL;
- Query = NULL;
- Count = NULL;
+ //Quote = NULL;
+ //Query = NULL;
+ //Count = NULL;
//Where = NULL;
- MulConn = NULL;
- DBQ = NULL;
- Qrp = NULL;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = 0;
- Nparm = 0;
- Placed = false;
+ //MulConn = NULL;
+ //DBQ = NULL;
+ //Qrp = NULL;
+ //Fpos = 0;
+ //Curpos = 0;
+ //AftRows = 0;
+ //CurNum = 0;
+ //Rbuf = 0;
+ //BufSize = 0;
+ //Nparm = 0;
+ //Placed = false;
} // end of TDBODBC standard constructor
-TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBASE(tdbp)
+TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp)
{
Ocp = tdbp->Ocp; // is that right ?
Cnp = tdbp->Cnp;
Connect = tdbp->Connect;
- TableName = tdbp->TableName;
- Schema = tdbp->Schema;
+ //TableName = tdbp->TableName;
+ //Schema = tdbp->Schema;
Ops = tdbp->Ops;
- Catalog = tdbp->Catalog;
- Srcdef = tdbp->Srcdef;
- Qrystr = tdbp->Qrystr;
- Memory = tdbp->Memory;
- Scrollable = tdbp->Scrollable;
- Quote = tdbp->Quote;
- Query = tdbp->Query;
- Count = tdbp->Count;
+ //Catalog = tdbp->Catalog;
+ //Srcdef = tdbp->Srcdef;
+ //Qrystr = tdbp->Qrystr;
+ //Memory = tdbp->Memory;
+ //Scrollable = tdbp->Scrollable;
+ //Quote = tdbp->Quote;
+ //Query = tdbp->Query;
+ //Count = tdbp->Count;
//Where = tdbp->Where;
- MulConn = tdbp->MulConn;
- DBQ = tdbp->DBQ;
- Options = tdbp->Options;
- Quoted = tdbp->Quoted;
- Rows = tdbp->Rows;
- Fpos = 0;
- Curpos = 0;
- AftRows = 0;
- CurNum = 0;
- Rbuf = 0;
- BufSize = tdbp->BufSize;
- Nparm = tdbp->Nparm;
- Qrp = tdbp->Qrp;
- Placed = false;
+ //MulConn = tdbp->MulConn;
+ //DBQ = tdbp->DBQ;
+ //Options = tdbp->Options;
+ //Quoted = tdbp->Quoted;
+ //Rows = tdbp->Rows;
+ //Fpos = 0;
+ //Curpos = 0;
+ //AftRows = 0;
+ //CurNum = 0;
+ //Rbuf = 0;
+ //BufSize = tdbp->BufSize;
+ //Nparm = tdbp->Nparm;
+ //Qrp = tdbp->Qrp;
+ //Placed = false;
} // end of TDBODBC copy constructor
// Method
-PTDB TDBODBC::CopyOne(PTABS t)
+PTDB TDBODBC::Clone(PTABS t)
{
PTDB tp;
PODBCCOL cp1, cp2;
@@ -386,6 +389,7 @@ void TDBODBC::SetFile(PGLOBAL g, PSZ fn)
DBQ = fn;
} // end of SetFile
+#if 0
/******************************************************************/
/* Convert an UTF-8 string to latin characters. */
/******************************************************************/
@@ -414,7 +418,15 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
PCOL colp;
if (Srcdef) {
- Query = new(g)STRING(g, 0, Srcdef);
+ if (strstr(Srcdef, "%s")) {
+ char *fil;
+
+ fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1");
+ Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil));
+ Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil));
+ } else
+ Query = new(g)STRING(g, 0, Srcdef);
+
return false;
} // endif Srcdef
@@ -442,7 +454,8 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
} else
oom |= Query->Append(buf);
- } // endif colp
+ ((PEXTCOL)colp)->SetRank(++Ncol);
+ } // endif colp
} else
// !Columns can occur for queries such that sql count(*) from...
@@ -458,10 +471,6 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
if (Catalog && *Catalog)
catp = Catalog;
- // Following lines are commented because of MSDEV-10520
- // Indeed the schema in the tablep is the local table database and
- // is normally not related to the remote table database.
- // TODO: Try to remember why this was done and if it was useful in some case.
//if (tablep->GetSchema())
// schmp = (char*)tablep->GetSchema();
//else
@@ -516,6 +525,7 @@ bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt)
return false;
} // end of MakeSQL
+#endif // 0
/***********************************************************************/
/* MakeInsert: make the Insert statement used with ODBC connection. */
@@ -536,7 +546,7 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
// Column name can be encoded in UTF-8
Decode(colp->GetName(), buf, sizeof(buf));
len += (strlen(buf) + 6); // comma + quotes + valist
- ((PODBCCOL)colp)->Rank = ++Nparm;
+ ((PEXTCOL)colp)->SetRank(++Nparm);
} // endif colp
// Below 32 is enough to contain the fixed part of the query
@@ -555,7 +565,7 @@ bool TDBODBC::MakeInsert(PGLOBAL g)
if (schmp)
len += strlen(schmp) + 1;
- // Column name can be encoded in UTF-8
+ // Table name can be encoded in UTF-8
Decode(TableName, buf, sizeof(buf));
len += (strlen(buf) + 32);
Query = new(g) STRING(g, len, "INSERT INTO ");
@@ -634,6 +644,7 @@ bool TDBODBC::BindParameters(PGLOBAL g)
return false;
} // end of BindParameters
+#if 0
/***********************************************************************/
/* MakeCommand: make the Update or Delete statement to send to the */
/* MySQL server. Limited to remote values and filtering. */
@@ -664,19 +675,20 @@ bool TDBODBC::MakeCommand(PGLOBAL g)
// If so, it must be quoted in the original query
strlwr(strcat(strcat(strcpy(name, " "), Name), " "));
- if (!strstr(" update delete low_priority ignore quick from ", name))
- strlwr(strcpy(name, Name)); // Not a keyword
- else
- strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
+ if (strstr(" update delete low_priority ignore quick from ", name)) {
+ strlwr(strcat(strcat(strcpy(name, qc), Name), qc));
+ k += 2;
+ } else
+ strlwr(strcpy(name, Name)); // Not a keyword
if ((p = strstr(qrystr, name))) {
for (i = 0; i < p - qrystr; i++)
stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i];
stmt[i] = 0;
- k = i + (int)strlen(Name);
+ k += i + (int)strlen(Name);
- if (qtd && *(p-1) == ' ')
+ if (qtd && *(p - 1) == ' ')
strcat(strcat(strcat(stmt, qc), TableName), qc);
else
strcat(stmt, TableName);
@@ -692,15 +704,14 @@ bool TDBODBC::MakeCommand(PGLOBAL g)
} else {
sprintf(g->Message, "Cannot use this %s command",
- (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
- return false;
+ (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE");
+ return true;
} // endif p
Query = new(g) STRING(g, 0, stmt);
return (!Query->GetSize());
} // end of MakeCommand
-#if 0
/***********************************************************************/
/* MakeUpdate: make the SQL statement to send to ODBC connection. */
/***********************************************************************/
@@ -818,6 +829,7 @@ int TDBODBC::Cardinality(PGLOBAL g)
return Cardinal;
} // end of Cardinality
+#if 0
/***********************************************************************/
/* ODBC GetMaxSize: returns table size estimate in number of lines. */
/***********************************************************************/
@@ -844,6 +856,7 @@ int TDBODBC::GetProgMax(PGLOBAL g)
{
return GetMaxSize(g);
} // end of GetProgMax
+#endif // 0
/***********************************************************************/
/* ODBC Access Method opening routine. */
@@ -981,6 +994,7 @@ bool TDBODBC::OpenDB(PGLOBAL g)
return false;
} // end of OpenDB
+#if 0
/***********************************************************************/
/* GetRecpos: return the position of last read record. */
/***********************************************************************/
@@ -988,6 +1002,7 @@ int TDBODBC::GetRecpos(void)
{
return Fpos;
} // end of GetRecpos
+#endif // 0
/***********************************************************************/
/* SetRecpos: set the position of next read record. */
@@ -1081,8 +1096,9 @@ int TDBODBC::ReadDB(PGLOBAL g)
int rc;
if (trace > 1)
- htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
- GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
+ htrc("ODBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode);
+ //htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n",
+ // GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex);
if (Mode == MODE_UPDATE || Mode == MODE_DELETE) {
if (!Query && MakeCommand(g))
@@ -1102,11 +1118,11 @@ int TDBODBC::ReadDB(PGLOBAL g)
} // endif Mode
- if (To_Kindex) {
- // Direct access of ODBC tables is not implemented yet
- strcpy(g->Message, MSG(NO_ODBC_DIRECT));
- return RC_FX;
- } // endif To_Kindex
+ //if (To_Kindex) {
+ // // Direct access of ODBC tables is not implemented yet
+ // strcpy(g->Message, MSG(NO_ODBC_DIRECT));
+ // return RC_FX;
+ // } // endif To_Kindex
/*********************************************************************/
/* Now start the reading process. */
@@ -1212,70 +1228,58 @@ void TDBODBC::CloseDB(PGLOBAL g)
/* ODBCCOL public constructor. */
/***********************************************************************/
ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am)
- : COLBLK(cdp, tdbp, i)
+ : EXTCOL(cdp, tdbp, cprec, i, am)
{
- if (cprec) {
- Next = cprec->GetNext();
- cprec->SetNext(this);
- } else {
- Next = tdbp->GetColumns();
- tdbp->SetColumns(this);
- } // endif cprec
-
// Set additional ODBC access method information for column.
- Crp = NULL;
-//Long = cdp->GetLong();
- Long = Precision;
+//Crp = NULL;
+//Long = Precision;
//strcpy(F_Date, cdp->F_Date);
- To_Val = NULL;
+//To_Val = NULL;
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 0; // Not known yet
-
- if (trace)
- htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this);
-
+//Bufp = NULL;
+//Blkp = NULL;
+//Rank = 0; // Not known yet
} // end of ODBCCOL constructor
/***********************************************************************/
/* ODBCCOL private constructor. */
/***********************************************************************/
-ODBCCOL::ODBCCOL(void) : COLBLK()
+ODBCCOL::ODBCCOL(void) : EXTCOL()
{
- Crp = NULL;
- Buf_Type = TYPE_INT; // This is a count(*) column
- // Set additional Dos access method information for column.
- Long = sizeof(int);
- To_Val = NULL;
+//Crp = NULL;
+//Buf_Type = TYPE_INT; // This is a count(*) column
+//// Set additional Dos access method information for column.
+//Long = sizeof(int);
+//To_Val = NULL;
Slen = 0;
StrLen = &Slen;
Sqlbuf = NULL;
- Bufp = NULL;
- Blkp = NULL;
- Rank = 1;
+//Bufp = NULL;
+//Blkp = NULL;
+//Rank = 1;
} // end of ODBCCOL constructor
/***********************************************************************/
/* ODBCCOL constructor used for copying columns. */
/* tdbp is the pointer to the new table descriptor. */
/***********************************************************************/
-ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp)
+ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp)
{
- Crp = col1->Crp;
- Long = col1->Long;
+//Crp = col1->Crp;
+//Long = col1->Long;
//strcpy(F_Date, col1->F_Date);
- To_Val = col1->To_Val;
+//To_Val = col1->To_Val;
Slen = col1->Slen;
StrLen = col1->StrLen;
Sqlbuf = col1->Sqlbuf;
- Bufp = col1->Bufp;
- Blkp = col1->Blkp;
- Rank = col1->Rank;
+//Bufp = col1->Bufp;
+//Blkp = col1->Blkp;
+//Rank = col1->Rank;
} // end of ODBCCOL copy constructor
+#if 0
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
@@ -1321,6 +1325,7 @@ bool ODBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
Status = (ok) ? BUF_EMPTY : BUF_NO;
return false;
} // end of SetBuffer
+#endif // 0
/***********************************************************************/
/* ReadColumn: when SQLFetch is used there is nothing to do as the */
@@ -1526,7 +1531,7 @@ TDBXDBC::TDBXDBC(PTDBXDBC tdbp) : TDBODBC(tdbp)
Nerr = tdbp->Nerr;
} // end of TDBXDBC copy constructor
-PTDB TDBXDBC::CopyOne(PTABS t)
+PTDB TDBXDBC::Clone(PTABS t)
{
PTDB tp;
PXSRCCOL cp1, cp2;
diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h
index aa6592d8abf..fcefad5647b 100644
--- a/storage/connect/tabodbc.h
+++ b/storage/connect/tabodbc.h
@@ -20,7 +20,7 @@ typedef class TDBSRC *PTDBSRC;
/***********************************************************************/
/* ODBC table. */
/***********************************************************************/
-class DllExport ODBCDEF : public TABDEF { /* Logical table description */
+class DllExport ODBCDEF : public EXTDEF { /* Logical table description */
friend class TDBODBC;
friend class TDBXDBC;
friend class TDBDRV;
@@ -33,14 +33,14 @@ public:
// Implementation
virtual const char *GetType(void) {return "ODBC";}
PSZ GetConnect(void) {return Connect;}
- PSZ GetTabname(void) {return Tabname;}
- PSZ GetTabschema(void) {return Tabschema;}
- PSZ GetTabcat(void) {return Tabcat;}
- PSZ GetSrcdef(void) {return Srcdef;}
- char GetSep(void) {return (Sep) ? *Sep : 0;}
- int GetQuoted(void) {return Quoted;}
+ //PSZ GetTabname(void) {return Tabname;}
+ //PSZ GetTabschema(void) {return Tabschema;}
+ //PSZ GetTabcat(void) {return Tabcat;}
+ //PSZ GetSrcdef(void) {return Srcdef;}
+ //char GetSep(void) {return (Sep) ? *Sep : 0;}
+ //int GetQuoted(void) {return Quoted;}
int GetCatver(void) {return Catver;}
- int GetOptions(void) {return Options;}
+ //int GetOptions(void) {return Options;}
// Methods
virtual int Indexable(void) {return 2;}
@@ -50,27 +50,27 @@ public:
protected:
// Members
PSZ Connect; /* ODBC connection string */
- PSZ Tabname; /* External table name */
- PSZ Tabschema; /* External table schema */
- PSZ Username; /* User connect name */
- PSZ Password; /* Password connect info */
- PSZ Tabcat; /* External table catalog */
- PSZ Tabtyp; /* Catalog table type */
- PSZ Colpat; /* Catalog column pattern */
- PSZ Srcdef; /* The source table SQL definition */
- PSZ Qchar; /* Identifier quoting character */
- PSZ Qrystr; /* The original query */
- PSZ Sep; /* Decimal separator */
+ //PSZ Tabname; /* External table name */
+ //PSZ Tabschema; /* External table schema */
+ //PSZ Username; /* User connect name */
+ //PSZ Password; /* Password connect info */
+ //PSZ Tabcat; /* External table catalog */
+ //PSZ Tabtyp; /* Catalog table type */
+ //PSZ Colpat; /* Catalog column pattern */
+ //PSZ Srcdef; /* The source table SQL definition */
+ //PSZ Qchar; /* Identifier quoting character */
+ //PSZ Qrystr; /* The original query */
+ //PSZ Sep; /* Decimal separator */
int Catver; /* ODBC version for catalog functions */
- int Options; /* Open connection options */
- int Cto; /* Open connection timeout */
- int Qto; /* Query (command) timeout */
- int Quoted; /* Identifier quoting level */
- int Maxerr; /* Maxerr for an Exec table */
- int Maxres; /* Maxres for a catalog table */
- int Memory; /* Put result set in memory */
- bool Scrollable; /* Use scrollable cursor */
- bool Xsrc; /* Execution type */
+ //int Options; /* Open connection options */
+ //int Cto; /* Open connection timeout */
+ //int Qto; /* Query (command) timeout */
+ //int Quoted; /* Identifier quoting level */
+ //int Maxerr; /* Maxerr for an Exec table */
+ //int Maxres; /* Maxres for a catalog table */
+ //int Memory; /* Put result set in memory */
+ //bool Scrollable; /* Use scrollable cursor */
+ //bool Xsrc; /* Execution type */
bool UseCnc; /* Use SQLConnect (!SQLDriverConnect) */
}; // end of ODBCDEF
@@ -81,7 +81,7 @@ public:
/* This is the ODBC Access Method class declaration for files from */
/* other DB drivers to be accessed via ODBC. */
/***********************************************************************/
-class TDBODBC : public TDBASE {
+class TDBODBC : public TDBEXT {
friend class ODBCCOL;
friend class ODBConn;
public:
@@ -95,8 +95,8 @@ class TDBODBC : public TDBASE {
{return (PTDB)new(g) TDBODBC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
- virtual int GetRecpos(void);
+ virtual PTDB Clone(PTABS t);
+//virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
virtual PSZ GetFile(PGLOBAL g);
virtual void SetFile(PGLOBAL g, PSZ fn);
@@ -108,8 +108,8 @@ class TDBODBC : public TDBASE {
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
- virtual int GetProgMax(PGLOBAL g);
+//virtual int GetMaxSize(PGLOBAL g);
+//virtual int GetProgMax(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -119,10 +119,10 @@ class TDBODBC : public TDBASE {
protected:
// Internal functions
- int Decode(char *utf, char *buf, size_t n);
- bool MakeSQL(PGLOBAL g, bool cnt);
+//int Decode(char *utf, char *buf, size_t n);
+//bool MakeSQL(PGLOBAL g, bool cnt);
bool MakeInsert(PGLOBAL g);
- bool MakeCommand(PGLOBAL g);
+//virtual bool MakeCommand(PGLOBAL g);
//bool MakeFilter(PGLOBAL g, bool c);
bool BindParameters(PGLOBAL g);
//char *MakeUpdate(PGLOBAL g);
@@ -132,46 +132,16 @@ class TDBODBC : public TDBASE {
ODBConn *Ocp; // Points to an ODBC connection class
ODBCCOL *Cnp; // Points to count(*) column
ODBCPARM Ops; // Additional parameters
- PSTRG Query; // Constructed SQL query
char *Connect; // Points to connection string
- char *TableName; // Points to ODBC table name
- char *Schema; // Points to ODBC table Schema
- char *User; // User connect info
- char *Pwd; // Password connect info
- char *Catalog; // Points to ODBC table Catalog
- char *Srcdef; // The source table SQL definition
- char *Count; // Points to count(*) SQL statement
-//char *Where; // Points to local where clause
- char *Quote; // The identifier quoting character
- char *MulConn; // Used for multiple ODBC tables
- char *DBQ; // The address part of Connect string
- char *Qrystr; // The original query
- char Sep; // The decimal separator
- int Options; // Connect options
- int Cto; // Connect timeout
- int Qto; // Query timeout
- int Quoted; // The identifier quoting level
- int Fpos; // Position of last read record
- int Curpos; // Cursor position of last fetch
- int AftRows; // The number of affected rows
- int Rows; // Rowset size
int Catver; // Catalog ODBC version
- int CurNum; // Current buffer line number
- int Rbuf; // Number of lines read in buffer
- int BufSize; // Size of connect string buffer
- int Nparm; // The number of statement parameters
- int Memory; // 0: No 1: Alloc 2: Put 3: Get
- bool Scrollable; // Use scrollable cursor
- bool Placed; // True for position reading
bool UseCnc; // Use SQLConnect (!SQLDriverConnect)
- PQRYRES Qrp; // Points to storage result
}; // end of class TDBODBC
/***********************************************************************/
/* Class ODBCCOL: ODBC access method column descriptor. */
/* This A.M. is used for ODBC tables. */
/***********************************************************************/
-class ODBCCOL : public COLBLK {
+class ODBCCOL : public EXTCOL {
friend class TDBODBC;
public:
// Constructors
@@ -181,12 +151,12 @@ class ODBCCOL : public COLBLK {
// Implementation
virtual int GetAmType(void) {return TYPE_AM_ODBC;}
SQLLEN *GetStrLen(void) {return StrLen;}
- int GetRank(void) {return Rank;}
+// int GetRank(void) {return Rank;}
// PVBLK GetBlkp(void) {return Blkp;}
- void SetCrp(PCOLRES crp) {Crp = crp;}
+// void SetCrp(PCOLRES crp) {Crp = crp;}
// Methods
- virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+//virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
void AllocateBuffers(PGLOBAL g, int rows);
@@ -195,19 +165,19 @@ class ODBCCOL : public COLBLK {
// void Print(PGLOBAL g, FILE *, uint);
protected:
- // Constructor used by GetMaxSize
+ // Constructor for count(*) column
ODBCCOL(void);
// Members
TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's
- PCOLRES Crp; // To storage result
- void *Bufp; // To extended buffer
- PVBLK Blkp; // To Value Block
+//PCOLRES Crp; // To storage result
+//void *Bufp; // To extended buffer
+//PVBLK Blkp; // To Value Block
//char F_Date[12]; // Internal Date format
- PVAL To_Val; // To value used for Insert
+//PVAL To_Val; // To value used for Insert
SQLLEN *StrLen; // As returned by ODBC
SQLLEN Slen; // Used with Fetch
- int Rank; // Rank (position) number in the query
+//int Rank; // Rank (position) number in the query
}; // end of class ODBCCOL
/***********************************************************************/
@@ -228,28 +198,19 @@ class TDBXDBC : public TDBODBC {
{return (PTDB)new(g) TDBXDBC(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
-//virtual int GetRecpos(void);
-//virtual PSZ GetFile(PGLOBAL g);
-//virtual void SetFile(PGLOBAL g, PSZ fn);
-//virtual void ResetSize(void);
-//virtual int GetAffectedRows(void) {return AftRows;}
-//virtual PSZ GetServer(void) {return "ODBC";}
+ virtual PTDB Clone(PTABS t);
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
-//virtual int GetProgMax(PGLOBAL g);
virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
virtual int DeleteDB(PGLOBAL g, int irc);
-//virtual void CloseDB(PGLOBAL g);
protected:
// Internal functions
PCMD MakeCMD(PGLOBAL g);
-//bool BindParameters(PGLOBAL g);
// Members
PCMD Cmdlist; // The commands to execute
diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp
index 256b454741c..c6d32884417 100644
--- a/storage/connect/tabpivot.cpp
+++ b/storage/connect/tabpivot.cpp
@@ -1,11 +1,11 @@
/************ TabPivot C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: TABPIVOT */
/* ------------- */
-/* Version 1.6 */
+/* Version 1.7 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -41,6 +41,7 @@
#include "global.h"
#include "plgdbsem.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabcol.h"
#include "colblk.h"
#include "tabmysql.h"
@@ -883,7 +884,7 @@ SRCCOL::SRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int n)
/***********************************************************************/
/* Initialize the column as pointing to the source column. */
/***********************************************************************/
-bool SRCCOL::Init(PGLOBAL g, PTDBASE tp)
+bool SRCCOL::Init(PGLOBAL g, PTDB tp)
{
if (PRXCOL::Init(g, tp))
return true;
diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h
index c397af05234..07d5c3e456b 100644
--- a/storage/connect/tabpivot.h
+++ b/storage/connect/tabpivot.h
@@ -183,7 +183,7 @@ class SRCCOL : public PRXCOL {
using PRXCOL::Init;
virtual void Reset(void) {}
void SetColumn(void);
- virtual bool Init(PGLOBAL g, PTDBASE tp);
+ virtual bool Init(PGLOBAL g, PTDB tp);
bool CompareLast(void);
protected:
diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp
index 76890e84429..2ddd1c3c753 100644
--- a/storage/connect/tabsys.cpp
+++ b/storage/connect/tabsys.cpp
@@ -159,7 +159,7 @@ TDBINI::TDBINI(PTDBINI tdbp) : TDBASE(tdbp)
} // end of TDBINI copy constructor
// Is this really useful ???
-PTDB TDBINI::CopyOne(PTABS t)
+PTDB TDBINI::Clone(PTABS t)
{
PTDB tp;
PINICOL cp1, cp2;
@@ -173,7 +173,7 @@ PTDB TDBINI::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Get the section list from the INI file. */
@@ -565,7 +565,7 @@ TDBXIN::TDBXIN(PTDBXIN tdbp) : TDBINI(tdbp)
} // end of TDBXIN copy constructor
// Is this really useful ???
-PTDB TDBXIN::CopyOne(PTABS t)
+PTDB TDBXIN::Clone(PTABS t)
{
PTDB tp;
PXINCOL cp1, cp2;
@@ -579,7 +579,7 @@ PTDB TDBXIN::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Get the key list from the INI file. */
diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h
index 6b454322906..ff1b8335690 100644
--- a/storage/connect/tabsys.h
+++ b/storage/connect/tabsys.h
@@ -57,7 +57,7 @@ class TDBINI : public TDBASE {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBINI(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return N;}
virtual int GetProgCur(void) {return N;}
//virtual int GetAffectedRows(void) {return 0;}
@@ -136,7 +136,7 @@ class TDBXIN : public TDBINI {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXIN(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void);
virtual bool SetRecpos(PGLOBAL g, int recpos);
virtual void ResetDB(void)
diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp
index e3baf7c3da5..0bf3f6beb43 100644
--- a/storage/connect/tabtbl.cpp
+++ b/storage/connect/tabtbl.cpp
@@ -1,11 +1,11 @@
/************* TabTbl C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABTBL */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to PlugDB Software Development 2008-2016 */
+/* (C) Copyright to PlugDB Software Development 2008-2017 */
/* Author: Olivier BERTRAND */
/* */
/* WHAT THIS PROGRAM DOES: */
@@ -70,6 +70,7 @@
#include "tabcol.h"
#include "tabdos.h" // TDBDOS and DOSCOL class dcls
#include "tabtbl.h"
+#include "tabext.h"
#include "tabmysql.h"
#include "ha_connect.h"
@@ -411,9 +412,9 @@ void TDBTBL::ResetDB(void)
colp->COLBLK::Reset();
for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext())
- ((PTDBASE)tabp->GetTo_Tdb())->ResetDB();
+ tabp->GetTo_Tdb()->ResetDB();
- Tdbp = (PTDBASE)Tablist->GetTo_Tdb();
+ Tdbp = Tablist->GetTo_Tdb();
Crp = 0;
} // end of ResetDB
@@ -458,7 +459,7 @@ bool TDBTBL::OpenDB(PGLOBAL g)
return TRUE;
if ((CurTable = Tablist)) {
- Tdbp = (PTDBASE)CurTable->GetTo_Tdb();
+ Tdbp = CurTable->GetTo_Tdb();
// Tdbp->SetMode(Mode);
// Tdbp->ResetDB();
// Tdbp->ResetSize();
@@ -515,7 +516,7 @@ int TDBTBL::ReadDB(PGLOBAL g)
/* Continue reading from next table file. */
/***************************************************************/
Tdbp->CloseDB(g);
- Tdbp = (PTDBASE)CurTable->GetTo_Tdb();
+ Tdbp = CurTable->GetTo_Tdb();
// Check and initialize the subtable columns
for (PCOL cp = Columns; cp; cp = cp->GetNext())
@@ -609,13 +610,13 @@ void TDBTBM::ResetDB(void)
// Local tables
for (PTABLE tabp = Tablist; tabp; tabp = tabp->GetNext())
- ((PTDBASE)tabp->GetTo_Tdb())->ResetDB();
+ tabp->GetTo_Tdb()->ResetDB();
// Remote tables
for (PTBMT tp = Tmp; tp; tp = tp->Next)
- ((PTDBASE)tp->Tap->GetTo_Tdb())->ResetDB();
+ tp->Tap->GetTo_Tdb()->ResetDB();
- Tdbp = (Tablist) ? (PTDBASE)Tablist->GetTo_Tdb() : NULL;
+ Tdbp = (Tablist) ? Tablist->GetTo_Tdb() : NULL;
Crp = 0;
} // end of ResetDB
@@ -716,7 +717,7 @@ bool TDBTBM::OpenDB(PGLOBAL g)
/* Proceed with local tables. */
/*********************************************************************/
if ((CurTable = Tablist)) {
- Tdbp = (PTDBASE)CurTable->GetTo_Tdb();
+ Tdbp = CurTable->GetTo_Tdb();
// Tdbp->SetMode(Mode);
// Check and initialize the subtable columns
@@ -808,7 +809,7 @@ int TDBTBM::ReadNextRemote(PGLOBAL g)
} // endif Curtable
- Tdbp = (PTDBASE)Cmp->Tap->GetTo_Tdb();
+ Tdbp = Cmp->Tap->GetTo_Tdb();
// Check and initialize the subtable columns
for (PCOL cp = Columns; cp; cp = cp->GetNext())
diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp
index 4069cdbed2a..762c61bd1a1 100644
--- a/storage/connect/tabutil.cpp
+++ b/storage/connect/tabutil.cpp
@@ -1,7 +1,7 @@
/************* Tabutil cpp Declares Source Code File (.CPP) ************/
-/* Name: TABUTIL.CPP Version 1.1 */
+/* Name: TABUTIL.CPP Version 1.2 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2013 - 2016 */
+/* (C) Copyright to the author Olivier BERTRAND 2013 - 2017 */
/* */
/* Utility function used by the PROXY, XCOL, OCCUR, and TBL tables. */
/***********************************************************************/
@@ -45,8 +45,9 @@
#include "myutil.h"
#include "valblk.h"
#include "resource.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
+#include "tabext.h"
#include "tabmysql.h"
#include "tabcol.h"
#include "tabutil.h"
@@ -356,7 +357,7 @@ TDBPRX::TDBPRX(PTDBPRX tdbp) : TDBASE(tdbp)
} // end of TDBPRX copy constructor
// Method
-PTDB TDBPRX::CopyOne(PTABS t)
+PTDB TDBPRX::Clone(PTABS t)
{
PTDB tp;
PPRXCOL cp1, cp2;
@@ -370,12 +371,12 @@ PTDB TDBPRX::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Get the PTDB of the sub-table. */
/***********************************************************************/
-PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b)
+PTDB TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b)
{
const char *sp = NULL;
char *db, *name;
@@ -456,13 +457,13 @@ PTDBASE TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b)
if (trace && tdbp)
htrc("Subtable %s in %s\n",
- name, SVP(((PTDBASE)tdbp)->GetDef()->GetDB()));
+ name, SVP(tdbp->GetDef()->GetDB()));
err:
if (s)
free_table_share(s);
- return (PTDBASE)tdbp;
+ return tdbp;
} // end of GetSubTable
/***********************************************************************/
@@ -560,9 +561,9 @@ bool TDBPRX::OpenDB(PGLOBAL g)
/* its column blocks in mode write (required by XML tables). */
/*********************************************************************/
if (Mode == MODE_UPDATE) {
- PTDBASE utp;
+ PTDB utp;
- if (!(utp= (PTDBASE)Tdbp->Duplicate(g))) {
+ if (!(utp= Tdbp->Duplicate(g))) {
sprintf(g->Message, MSG(INV_UPDT_TABLE), Tdbp->GetName());
return true;
} // endif tp
@@ -681,7 +682,7 @@ char *PRXCOL::Decode(PGLOBAL g, const char *cnm)
/* PRXCOL initialization routine. */
/* Look for the matching column in the object table. */
/***********************************************************************/
-bool PRXCOL::Init(PGLOBAL g, PTDBASE tp)
+bool PRXCOL::Init(PGLOBAL g, PTDB tp)
{
if (!tp)
tp = ((PTDBPRX)To_Tdb)->Tdbp;
diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h
index b320d169b36..8e56aecff86 100644
--- a/storage/connect/tabutil.h
+++ b/storage/connect/tabutil.h
@@ -67,7 +67,7 @@ class DllExport TDBPRX : public TDBASE {
{return (PTDB)new(g) TDBPRX(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void) {return Tdbp->GetRecpos();}
virtual void ResetDB(void) {Tdbp->ResetDB();}
virtual int RowNumber(PGLOBAL g, bool b = FALSE);
@@ -83,12 +83,12 @@ class DllExport TDBPRX : public TDBASE {
virtual int WriteDB(PGLOBAL g);
virtual int DeleteDB(PGLOBAL g, int irc);
virtual void CloseDB(PGLOBAL g) {if (Tdbp) Tdbp->CloseDB(g);}
- PTDBASE GetSubTable(PGLOBAL g, PTABLE tabp, bool b = false);
+ PTDB GetSubTable(PGLOBAL g, PTABLE tabp, bool b = false);
void RemoveNext(PTABLE tp);
protected:
// Members
- PTDBASE Tdbp; // The object table
+ PTDB Tdbp; // The object table
}; // end of class TDBPRX
/***********************************************************************/
@@ -115,7 +115,7 @@ class DllExport PRXCOL : public COLBLK {
{return false;}
virtual void ReadColumn(PGLOBAL g);
virtual void WriteColumn(PGLOBAL g);
- virtual bool Init(PGLOBAL g, PTDBASE tp);
+ virtual bool Init(PGLOBAL g, PTDB tp);
protected:
char *Decode(PGLOBAL g, const char *cnm);
diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp
index e788529075f..282fb55a43c 100644
--- a/storage/connect/tabvct.cpp
+++ b/storage/connect/tabvct.cpp
@@ -241,7 +241,7 @@ PTDB VCTDEF::GetTable(PGLOBAL g, MODE mode)
/*********************************************************************/
if (mode != MODE_INSERT)
if (tdbp->GetBlockValues(g))
- PushWarning(g, (PTDBASE)tdbp);
+ PushWarning(g, tdbp);
// return NULL; // causes a crash when deleting index
return tdbp;
@@ -263,7 +263,7 @@ TDBVCT::TDBVCT(PGLOBAL g, PTDBVCT tdbp) : TDBFIX(g, tdbp)
} // end of TDBVCT copy constructor
// Method
-PTDB TDBVCT::CopyOne(PTABS t)
+PTDB TDBVCT::Clone(PTABS t)
{
PTDB tp;
PVCTCOL cp1, cp2;
@@ -277,7 +277,7 @@ PTDB TDBVCT::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate VCT column description block. */
diff --git a/storage/connect/tabvct.h b/storage/connect/tabvct.h
index 8ad3c8e21be..189a9ae2221 100644
--- a/storage/connect/tabvct.h
+++ b/storage/connect/tabvct.h
@@ -68,7 +68,7 @@ class DllExport TDBVCT : public TDBFIX {
bool IsSplit(void) {return ((VCTDEF*)To_Def)->Split;}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual bool IsUsingTemp(PGLOBAL g);
// Database routines
diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp
index 356fc981357..155c71fe268 100644
--- a/storage/connect/tabvir.cpp
+++ b/storage/connect/tabvir.cpp
@@ -20,7 +20,7 @@
#include "plgdbsem.h"
#include "filter.h"
#include "xtable.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "colblk.h"
#include "mycat.h" // for FNC_COL
#include "tabvir.h"
diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp
index 98a44b9d635..4871a1d66dc 100644
--- a/storage/connect/tabwmi.cpp
+++ b/storage/connect/tabwmi.cpp
@@ -1,5 +1,5 @@
/***********************************************************************/
-/* TABWMI: Author Olivier Bertrand -- PlugDB -- 2012 - 2013 */
+/* TABWMI: Author Olivier Bertrand -- PlugDB -- 2012 - 2017 */
/* TABWMI: Virtual table to get WMI information. */
/***********************************************************************/
#if !defined(__WIN__)
@@ -11,8 +11,9 @@
#include "global.h"
#include "plgdbsem.h"
#include "mycat.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
+#include "tabext.h"
#include "colblk.h"
//#include "filter.h"
//#include "xindex.h"
@@ -62,7 +63,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
if (FAILED(res)) {
sprintf(g->Message, "Failed to initialize COM library. "
- "Error code = %p", res);
+ "Error code = %x", res);
return NULL;
} // endif res
@@ -85,7 +86,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
(void**) &loc);
if (FAILED(res)) {
sprintf(g->Message, "Failed to create Locator. "
- "Error code = %p", res);
+ "Error code = %x", res);
CoUninitialize();
return NULL;
} // endif res
@@ -94,7 +95,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname)
NULL, NULL, NULL, 0, NULL, NULL, &wp->Svc);
if (FAILED(res)) {
- sprintf(g->Message, "Could not connect. Error code = %p", res);
+ sprintf(g->Message, "Could not connect. Error code = %x", res);
loc->Release();
CoUninitialize();
return NULL;
@@ -423,7 +424,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
if (FAILED(Res)) {
sprintf(g->Message, "Failed to initialize COM library. "
- "Error code = %p", Res);
+ "Error code = %x", Res);
return true; // Program has failed.
} // endif Res
@@ -436,7 +437,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
if (FAILED(Res)) {
sprintf(g->Message, "Failed to create Locator. "
- "Error code = %p", Res);
+ "Error code = %x", Res);
CoUninitialize();
return true; // Program has failed.
} // endif Res
@@ -448,7 +449,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
NULL, NULL,0, NULL, 0, 0, &Svc);
if (FAILED(Res)) {
- sprintf(g->Message, "Could not connect. Error code = %p", Res);
+ sprintf(g->Message, "Could not connect. Error code = %x", Res);
loc->Release();
CoUninitialize();
return true; // Program has failed.
@@ -463,7 +464,7 @@ bool TDBWMI::Initialize(PGLOBAL g)
RPC_C_IMP_LEVEL_IMPERSONATE, NULL, EOAC_NONE);
if (FAILED(Res)) {
- sprintf(g->Message, "Could not set proxy. Error code = 0x", Res);
+ sprintf(g->Message, "Could not set proxy. Error code = %x", Res);
Svc->Release();
CoUninitialize();
return true; // Program has failed.
@@ -573,7 +574,7 @@ bool TDBWMI::GetWMIInfo(PGLOBAL g)
NULL, &Enumerator);
if (FAILED(Rc)) {
- sprintf(g->Message, "Query %s failed. Error code = %p", cmd, Rc);
+ sprintf(g->Message, "Query %s failed. Error code = %x", cmd, Rc);
Svc->Release();
CoUninitialize();
return true; // Program has failed.
diff --git a/storage/connect/tabxcl.cpp b/storage/connect/tabxcl.cpp
index add61431493..93a24accc3c 100644
--- a/storage/connect/tabxcl.cpp
+++ b/storage/connect/tabxcl.cpp
@@ -1,7 +1,7 @@
/************* TabXcl CPP Declares Source Code File (.CPP) *************/
/* Name: TABXCL.CPP Version 1.0 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2013 */
+/* (C) Copyright to the author Olivier BERTRAND 2013-2017 */
/* */
/* XCOL: Table having one column containing several values */
/* comma separated. When creating the table, the name of the X */
@@ -45,12 +45,12 @@
#include "plgdbsem.h"
#include "plgcnx.h" // For DB types
#include "resource.h"
-#include "reldef.h"
+#include "xtable.h"
+#include "tabext.h"
#include "filamtxt.h"
#include "tabdos.h"
#include "tabcol.h"
#include "tabxcl.h"
-#include "xtable.h"
#include "tabmysql.h"
#include "ha_connect.h"
@@ -246,7 +246,7 @@ XCLCOL::XCLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
/* XCLCOL initialization routine. */
/* Allocate Cbuf that will contain the Colp value. */
/***********************************************************************/
-bool XCLCOL::Init(PGLOBAL g, PTDBASE tp)
+bool XCLCOL::Init(PGLOBAL g, PTDB tp)
{
if (PRXCOL::Init(g, tp))
return true;
diff --git a/storage/connect/tabxcl.h b/storage/connect/tabxcl.h
index 291f0b4263a..fde000ee709 100644
--- a/storage/connect/tabxcl.h
+++ b/storage/connect/tabxcl.h
@@ -91,7 +91,7 @@ class XCLCOL : public PRXCOL {
using PRXCOL::Init;
virtual void Reset(void) {} // Evaluated only by TDBXCL
virtual void ReadColumn(PGLOBAL g);
- virtual bool Init(PGLOBAL g, PTDBASE tp = NULL);
+ virtual bool Init(PGLOBAL g, PTDB tp = NULL);
protected:
// Default constructor not to be used
diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp
index 3b8229fcf51..52cf3d3812f 100644
--- a/storage/connect/tabxml.cpp
+++ b/storage/connect/tabxml.cpp
@@ -42,7 +42,7 @@
/***********************************************************************/
#include "global.h"
#include "plgdbsem.h"
-#include "reldef.h"
+//#include "reldef.h"
#include "xtable.h"
#include "colblk.h"
#include "mycat.h"
@@ -537,6 +537,11 @@ PTDB XMLDEF::GetTable(PGLOBAL g, MODE m)
if (Catfunc == FNC_COL)
return new(g) TDBXCT(this);
+ if (Zipped && !(m == MODE_READ || m == MODE_ANY)) {
+ strcpy(g->Message, "ZIpped XML tables are read only");
+ return NULL;
+ } // endif Zipped
+
PTDBASE tdbp = new(g) TDBXML(this);
if (Multiple)
@@ -655,7 +660,7 @@ TDBXML::TDBXML(PTDBXML tdbp) : TDBASE(tdbp)
} // end of TDBXML copy constructor
// Used for update
-PTDB TDBXML::CopyOne(PTABS t)
+PTDB TDBXML::Clone(PTABS t)
{
PTDB tp;
PXMLCOL cp1, cp2;
@@ -669,7 +674,7 @@ PTDB TDBXML::CopyOne(PTABS t)
} // endfor cp1
return tp;
- } // end of CopyOne
+ } // end of Clone
/***********************************************************************/
/* Allocate XML column description block. */
@@ -926,7 +931,7 @@ bool TDBXML::Initialize(PGLOBAL g)
if (rc)
sprintf(g->Message, "%s: %s", MSG(COM_ERROR), buf);
else
- sprintf(g->Message, "%s hr=%p", MSG(COM_ERROR), e.Error());
+ sprintf(g->Message, "%s hr=%x", MSG(COM_ERROR), e.Error());
goto error;
#endif // __WIN__
diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h
index 6c586d79dec..65b353072cb 100644
--- a/storage/connect/tabxml.h
+++ b/storage/connect/tabxml.h
@@ -71,7 +71,7 @@ class DllExport TDBXML : public TDBASE {
virtual PTDB Duplicate(PGLOBAL g) {return (PTDB)new(g) TDBXML(this);}
// Methods
- virtual PTDB CopyOne(PTABS t);
+ virtual PTDB Clone(PTABS t);
virtual int GetRecpos(void);
virtual int GetProgCur(void) {return N;}
virtual PSZ GetFile(PGLOBAL g) {return Xfile;}
diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp
index 11f414ee154..b91059a3843 100644
--- a/storage/connect/tabzip.cpp
+++ b/storage/connect/tabzip.cpp
@@ -70,8 +70,12 @@ PCOL TDBZIP::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
/* param: filename path and the filename of the zip file to open. */
/* return: true if open, false otherwise. */
/***********************************************************************/
-bool TDBZIP::open(PGLOBAL g, const char *filename)
+bool TDBZIP::open(PGLOBAL g, const char *fn)
{
+ char filename[_MAX_PATH];
+
+ PlugSetPath(filename, fn, GetPath());
+
if (!zipfile && !(zipfile = unzOpen64(filename)))
sprintf(g->Message, "Zipfile open error");
@@ -102,7 +106,7 @@ int TDBZIP::Cardinality(PGLOBAL g)
unz_global_info64 ginfo;
int err = unzGetGlobalInfo64(zipfile, &ginfo);
- Cardinal = (err == UNZ_OK) ? ginfo.number_entry : 0;
+ Cardinal = (err == UNZ_OK) ? (int)ginfo.number_entry : 0;
} else
Cardinal = 0;
@@ -221,6 +225,14 @@ void ZIPCOL::ReadColumn(PGLOBAL g)
case 3:
Value->SetValue((int)Tdbz->finfo.compression_method);
break;
+ case 4:
+ Tdbz->finfo.tmu_date.tm_year -= 1900;
+
+ if (((DTVAL*)Value)->MakeTime((tm*)&Tdbz->finfo.tmu_date))
+ Value->SetNull(true);
+
+ Tdbz->finfo.tmu_date.tm_year += 1900;
+ break;
default:
Value->SetValue_psz((PSZ)Tdbz->fn);
} // endswitch flag
diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h
index 6f1735258e7..dcec3475371 100644
--- a/storage/connect/tabzip.h
+++ b/storage/connect/tabzip.h
@@ -20,7 +20,7 @@ typedef class ZIPCOL *PZIPCOL;
/***********************************************************************/
class DllExport ZIPDEF : public DOSDEF { /* Table description */
friend class TDBZIP;
- friend class ZIPFAM;
+ friend class UNZFAM;
public:
// Constructor
ZIPDEF(void) {}
diff --git a/storage/connect/value.h b/storage/connect/value.h
index a670ade4c28..14a568c3549 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -271,7 +271,7 @@ class DllExport TYPVAL<PSZ>: public VALUE {
virtual void Reset(void) {*Strp = 0;}
virtual int GetValLen(void) {return Len;};
virtual int GetValPrec() {return (Ci) ? 1 : 0;}
- virtual int GetSize(void) {return (Strp) ? strlen(Strp) : 0;}
+ virtual int GetSize(void) {return (Strp) ? (int)strlen(Strp) : 0;}
virtual PSZ GetCharValue(void) {return Strp;}
virtual char GetTinyValue(void);
virtual uchar GetUTinyValue(void);
diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp
index a2cf4e77b80..15fb71ab88a 100755
--- a/storage/connect/xindex.cpp
+++ b/storage/connect/xindex.cpp
@@ -81,7 +81,7 @@ int PlgMakeIndex(PGLOBAL g, PSZ name, PIXDEF pxdf, bool add)
{
int rc;
PTABLE tablep;
- PTDBASE tdbp;
+ PTDB tdbp;
PCATLG cat = PlgGetCatalog(g, true);
/*********************************************************************/
@@ -89,12 +89,12 @@ int PlgMakeIndex(PGLOBAL g, PSZ name, PIXDEF pxdf, bool add)
/*********************************************************************/
tablep = new(g) XTAB(name);
- if (!(tdbp = (PTDBASE)cat->GetTable(g, tablep)))
+ if (!(tdbp = cat->GetTable(g, tablep)))
rc = RC_NF;
else if (!tdbp->GetDef()->Indexable()) {
sprintf(g->Message, MSG(TABLE_NO_INDEX), name);
rc = RC_NF;
- } else if ((rc = tdbp->MakeIndex(g, pxdf, add)) == RC_INFO)
+ } else if ((rc = ((PTDBASE)tdbp)->MakeIndex(g, pxdf, add)) == RC_INFO)
rc = RC_OK; // No or remote index
return rc;
@@ -2738,7 +2738,7 @@ bool XHUGE::Read(PGLOBAL g, void *buf, int n, int size)
} // endif nbr
} else {
- char *buf[256];
+ char buf[256];
DWORD drc = GetLastError();
FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM |
diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h
index ef2e934e5ee..2d10d72722e 100644
--- a/storage/connect/xindex.h
+++ b/storage/connect/xindex.h
@@ -184,7 +184,7 @@ class DllExport XXBASE : public CSORT, public BLOCK {
virtual bool IsRandom(void) {return true;}
virtual bool IsDynamic(void) {return Dynamic;}
virtual void SetDynamic(bool dyn) {Dynamic = dyn;}
- virtual bool HaveSame(void) {return false;}
+//virtual bool HaveSame(void) {return false;}
virtual int GetCurPos(void) {return Cur_K;}
virtual void SetNval(int n) {assert(n == 1);}
virtual void SetOp(OPVAL op) {Op = op;}
@@ -256,7 +256,7 @@ class DllExport XINDEX : public XXBASE {
// Implementation
virtual IDT GetType(void) {return TYPE_IDX_INDX;}
virtual bool IsMul(void) {return (Nval < Nk) ? true : Mul;}
- virtual bool HaveSame(void) {return Op == OP_SAME;}
+//virtual bool HaveSame(void) {return Op == OP_SAME;}
virtual int GetCurPos(void) {return (Pex) ? Pex[Cur_K] : Cur_K;}
virtual void SetNval(int n) {Nval = n;}
int GetMaxSame(void) {return MaxSame;}
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h
index d78cd09f9a4..8f6c23c4aeb 100644
--- a/storage/connect/xobject.h
+++ b/storage/connect/xobject.h
@@ -127,7 +127,8 @@ class DllExport STRING : public BLOCK {
// Implementation
inline int GetLength(void) {return (int)Length;}
- inline PSZ GetStr(void) {return Strp;}
+ inline void SetLength(uint n) {Length = n;}
+ inline PSZ GetStr(void) {return Strp;}
inline uint32 GetSize(void) {return Size;}
// Methods
diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h
index e18a08a54b8..4aeea05946a 100644
--- a/storage/connect/xtable.h
+++ b/storage/connect/xtable.h
@@ -1,7 +1,7 @@
/**************** Table H Declares Source Code File (.H) ***************/
-/* Name: TABLE.H Version 2.3 */
+/* Name: TABLE.H Version 2.4 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1999-2015 */
+/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */
/* */
/* This file contains the TBX, OPJOIN and TDB class definitions. */
/***********************************************************************/
@@ -17,6 +17,7 @@
#include "block.h"
#include "colblk.h"
#include "m_ctype.h"
+#include "reldef.h"
typedef class CMD *PCMD;
typedef struct st_key_range key_range;
@@ -32,24 +33,30 @@ class CMD : public BLOCK {
char *Cmd;
}; // end of class CMD
+#if 0
// Condition filter structure
class CONDFIL : public BLOCK {
public:
// Constructor
CONDFIL(const Item *cond, uint idx, AMT type)
{
- Cond = cond; Idx = idx; Type = type; Body = NULL; Op = OP_XX; Cmds = NULL;
+ Cond = cond; Idx = idx; Type = type; Op = OP_XX;
+ Cmds = NULL; All = true; Body = NULL, Having = NULL;
}
// Members
const Item *Cond;
AMT Type;
uint Idx;
- char *Body;
OPVAL Op;
PCMD Cmds;
+ bool All;
+ char *Body;
+ char *Having;
}; // end of class CONDFIL
+#endif // 0
+typedef class EXTCOL *PEXTCOL;
typedef class CONDFIL *PCFIL;
typedef class TDBCAT *PTDBCAT;
typedef class CATCOL *PCATCOL;
@@ -64,47 +71,61 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
TDB(PTDB tdbp);
// Implementation
- static void SetTnum(int n) {Tnum = n;}
- inline PTDB GetOrig(void) {return To_Orig;}
- inline TUSE GetUse(void) {return Use;}
- inline PCFIL GetCondFil(void) {return To_CondFil;}
- inline LPCSTR GetName(void) {return Name;}
- inline PTABLE GetTable(void) {return To_Table;}
- inline PCOL GetColumns(void) {return Columns;}
- inline int GetDegree(void) {return Degree;}
- inline MODE GetMode(void) {return Mode;}
- inline PFIL GetFilter(void) {return To_Filter;}
- inline void SetFilter(PFIL fp) {To_Filter = fp;}
- inline void SetOrig(PTDB txp) {To_Orig = txp;}
- inline void SetUse(TUSE n) {Use = n;}
- inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;}
- inline void SetNext(PTDB tdbp) {Next = tdbp;}
- inline void SetName(LPCSTR name) {Name = name;}
- inline void SetTable(PTABLE tablep) {To_Table = tablep;}
- inline void SetColumns(PCOL colp) {Columns = colp;}
- inline void SetDegree(int degree) {Degree = degree;}
- inline void SetMode(MODE mode) {Mode = mode;}
+ static void SetTnum(int n) {Tnum = n;}
+ inline PTABDEF GetDef(void) {return To_Def;}
+ inline PTDB GetOrig(void) {return To_Orig;}
+ inline TUSE GetUse(void) {return Use;}
+ inline PCFIL GetCondFil(void) {return To_CondFil;}
+ inline LPCSTR GetName(void) {return Name;}
+ inline PTABLE GetTable(void) {return To_Table;}
+ inline PCOL GetColumns(void) {return Columns;}
+ inline int GetDegree(void) {return Degree;}
+ inline MODE GetMode(void) {return Mode;}
+ inline PFIL GetFilter(void) {return To_Filter;}
+ inline PCOL GetSetCols(void) {return To_SetCols;}
+ inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
+ inline void SetFilter(PFIL fp) {To_Filter = fp;}
+ inline void SetOrig(PTDB txp) {To_Orig = txp;}
+ inline void SetUse(TUSE n) {Use = n;}
+ inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;}
+ inline void SetNext(PTDB tdbp) {Next = tdbp;}
+ inline void SetName(LPCSTR name) {Name = name;}
+ inline void SetTable(PTABLE tablep) {To_Table = tablep;}
+ inline void SetColumns(PCOL colp) {Columns = colp;}
+ inline void SetDegree(int degree) {Degree = degree;}
+ inline void SetMode(MODE mode) {Mode = mode;}
// Properties
- virtual AMT GetAmType(void) {return TYPE_AM_ERROR;}
- virtual int GetTdb_No(void) {return Tdb_No;}
- virtual PTDB GetNext(void) {return Next;}
- virtual PCATLG GetCat(void) {return NULL;}
- virtual void SetAbort(bool) {;}
+ virtual AMT GetAmType(void) {return TYPE_AM_ERROR;}
+ virtual bool IsRemote(void) {return false;}
+ virtual bool IsIndexed(void) {return false;}
+ virtual int GetTdb_No(void) {return Tdb_No;}
+ virtual PTDB GetNext(void) {return Next;}
+ virtual PCATLG GetCat(void) {return NULL;}
+ virtual void SetAbort(bool) {;}
+ virtual PKXBASE GetKindex(void) {return NULL;}
// Methods
virtual bool IsSame(PTDB tp) {return tp == this;}
- virtual bool IsSpecial(PSZ name) = 0;
- virtual bool GetBlockValues(PGLOBAL) {return false;}
+ virtual bool IsSpecial(PSZ name);
+ virtual bool IsReadOnly(void) {return Read_Only;}
+ virtual bool IsView(void) {return FALSE;}
+ virtual PSZ GetPath(void);
+ virtual RECFM GetFtype(void) {return RECFM_NAF;}
+ virtual bool GetBlockValues(PGLOBAL) { return false; }
virtual int Cardinality(PGLOBAL) {return 0;}
- virtual int GetMaxSize(PGLOBAL) = 0;
+ virtual int GetRecpos(void) = 0;
+ virtual bool SetRecpos(PGLOBAL g, int recpos);
+ virtual int GetMaxSize(PGLOBAL) = 0;
virtual int GetProgMax(PGLOBAL) = 0;
- virtual int GetProgCur(void) = 0;
- virtual int RowNumber(PGLOBAL g, bool b = false);
- virtual bool IsReadOnly(void) {return true;}
- virtual const CHARSET_INFO *data_charset() {return NULL;}
+ virtual int GetProgCur(void) {return GetRecpos();}
+ virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
+ virtual void SetFile(PGLOBAL, PSZ) {}
+ virtual void ResetDB(void) {}
+ virtual void ResetSize(void) {MaxSize = -1;}
+ virtual int RowNumber(PGLOBAL g, bool b = false);
virtual PTDB Duplicate(PGLOBAL) {return NULL;}
- virtual PTDB CopyOne(PTABS) {return this;}
+ virtual PTDB Clone(PTABS) {return this;}
virtual PTDB Copy(PTABS t);
virtual void PrintAM(FILE *f, char *m)
{fprintf(f, "%s AM(%d)\n", m, GetAmType());}
@@ -112,10 +133,15 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
virtual void Print(PGLOBAL g, char *ps, uint z);
virtual PSZ GetServer(void) = 0;
virtual int GetBadLines(void) {return 0;}
+ virtual CHARSET_INFO *data_charset(void);
- // Database pure virtual routines
- virtual PCOL ColDB(PGLOBAL g, PSZ name, int num) = 0;
- virtual void MarkDB(PGLOBAL, PTDB) = 0;
+ // Database routines
+ virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
+ virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
+ {assert(false); return NULL;}
+ virtual PCOL InsertSpecialColumn(PCOL colp);
+ virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
+ virtual void MarkDB(PGLOBAL g, PTDB tdb2);
virtual bool OpenDB(PGLOBAL) = 0;
virtual int ReadDB(PGLOBAL) = 0;
virtual int WriteDB(PGLOBAL) = 0;
@@ -126,20 +152,26 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block.
protected:
// Members
- PTDB To_Orig; // Pointer to original if it is a copy
- TUSE Use;
- PFIL To_Filter;
- PCFIL To_CondFil; // To condition filter structure
- static int Tnum; // Used to generate Tdb_no's
- const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN
- PTDB Next; // Next in linearized queries
- PTABLE To_Table; // Points to the XTAB object
- LPCSTR Name; // Table name
- PCOL Columns; // Points to the first column of the table
- MODE Mode; // 10 Read, 30 Update, 40 Insert, 50 Delete
- int Degree; // Number of columns
- int Cardinal; // Table number of rows
- }; // end of class TDB
+ PTDB To_Orig; // Pointer to original if it is a copy
+ PTABDEF To_Def; // Points to catalog description block
+ TUSE Use;
+ PFIL To_Filter;
+ PCFIL To_CondFil; // To condition filter structure
+ static int Tnum; // Used to generate Tdb_no's
+ const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN
+ PTDB Next; // Next in linearized queries
+ PTABLE To_Table; // Points to the XTAB object
+ LPCSTR Name; // Table name
+ PCOL Columns; // Points to the first column of the table
+ PCOL To_SetCols; // Points to updated columns
+ MODE Mode; // 10 Read, 30 Update, 40 Insert, 50 Delete
+ int Degree; // Number of columns
+ int Cardinal; // Table number of rows
+ int MaxSize; // Max size in number of lines
+ bool Read_Only; // True for read only tables
+ const CHARSET_INFO *m_data_charset;
+ const char *csname; // Table charset name
+}; // end of class TDB
/***********************************************************************/
/* This is the base class for all query tables (except decode). */
@@ -155,50 +187,50 @@ class DllExport TDBASE : public TDB {
// Implementation
inline int GetKnum(void) {return Knum;}
- inline PTABDEF GetDef(void) {return To_Def;}
- inline PKXBASE GetKindex(void) {return To_Kindex;}
- inline PCOL GetSetCols(void) {return To_SetCols;}
- inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
+//inline PTABDEF GetDef(void) {return To_Def;}
+//inline PCOL GetSetCols(void) {return To_SetCols;}
+//inline void SetSetCols(PCOL colp) {To_SetCols = colp;}
inline void SetKey_Col(PCOL *cpp) {To_Key_Col = cpp;}
inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;}
inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;}
// Properties
- void ResetKindex(PGLOBAL g, PKXBASE kxp);
+ virtual PKXBASE GetKindex(void) {return To_Kindex;}
+ void ResetKindex(PGLOBAL g, PKXBASE kxp);
PCOL Key(int i) {return (To_Key_Col) ? To_Key_Col[i] : NULL;}
// Methods
virtual bool IsUsingTemp(PGLOBAL) {return false;}
- virtual bool IsIndexed(void) {return false;}
- virtual bool IsSpecial(PSZ name);
+//virtual bool IsIndexed(void) {return false;}
+//virtual bool IsSpecial(PSZ name);
virtual PCATLG GetCat(void);
- virtual PSZ GetPath(void);
+//virtual PSZ GetPath(void);
virtual void PrintAM(FILE *f, char *m);
- virtual RECFM GetFtype(void) {return RECFM_NAF;}
+//virtual RECFM GetFtype(void) {return RECFM_NAF;}
//virtual int GetAffectedRows(void) {return -1;}
- virtual int GetRecpos(void) = 0;
- virtual bool SetRecpos(PGLOBAL g, int recpos);
- virtual bool IsReadOnly(void) {return Read_Only;}
- virtual bool IsView(void) {return FALSE;}
- virtual CHARSET_INFO *data_charset(void);
+//virtual int GetRecpos(void) = 0;
+//virtual bool SetRecpos(PGLOBAL g, int recpos);
+//virtual bool IsReadOnly(void) {return Read_Only;}
+//virtual bool IsView(void) {return FALSE;}
+//virtual CHARSET_INFO *data_charset(void);
virtual int GetProgMax(PGLOBAL g) {return GetMaxSize(g);}
- virtual int GetProgCur(void) {return GetRecpos();}
- virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
- virtual int GetRemote(void) {return 0;}
- virtual void SetFile(PGLOBAL, PSZ) {}
- virtual void ResetDB(void) {}
- virtual void ResetSize(void) {MaxSize = -1;}
+//virtual int GetProgCur(void) {return GetRecpos();}
+//virtual PSZ GetFile(PGLOBAL) {return "Not a file";}
+//virtual int GetRemote(void) {return 0;}
+//virtual void SetFile(PGLOBAL, PSZ) {}
+//virtual void ResetDB(void) {}
+//virtual void ResetSize(void) {MaxSize = -1;}
virtual void RestoreNrec(void) {}
virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox);
virtual PSZ GetServer(void) {return "Current";}
// Database routines
- virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
- virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
- {assert(false); return NULL;}
- virtual PCOL InsertSpecialColumn(PCOL colp);
- virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
- virtual void MarkDB(PGLOBAL g, PTDB tdb2);
+//virtual PCOL ColDB(PGLOBAL g, PSZ name, int num);
+//virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int)
+// {assert(false); return NULL;}
+//virtual PCOL InsertSpecialColumn(PCOL colp);
+//virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp);
+//virtual void MarkDB(PGLOBAL g, PTDB tdb2);
virtual int MakeIndex(PGLOBAL g, PIXDEF, bool)
{strcpy(g->Message, "Remote index"); return RC_INFO;}
virtual bool ReadKey(PGLOBAL, OPVAL, const key_range *)
@@ -209,19 +241,19 @@ class DllExport TDBASE : public TDB {
"This function should not be called for this table"); return true;}
// Members
- PTABDEF To_Def; // Points to catalog description block
+//PTABDEF To_Def; // Points to catalog description block
PXOB *To_Link; // Points to column of previous relations
PCOL *To_Key_Col; // Points to key columns in current file
PKXBASE To_Kindex; // Points to table key index
PIXDEF To_Xdp; // To the index definition block
- PCOL To_SetCols; // Points to updated columns
+//PCOL To_SetCols; // Points to updated columns
RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT)
- int MaxSize; // Max size in number of lines
+//int MaxSize; // Max size in number of lines
int Knum; // Size of key arrays
- bool Read_Only; // True for read only tables
- const CHARSET_INFO *m_data_charset;
- const char *csname; // Table charset name
- }; // end of class TDBASE
+//bool Read_Only; // True for read only tables
+//const CHARSET_INFO *m_data_charset;
+//const char *csname; // Table charset name
+}; // end of class TDBASE
/***********************************************************************/
/* The abstract base class declaration for the catalog tables. */
@@ -243,7 +275,8 @@ class DllExport TDBCAT : public TDBASE {
// Database routines
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
- virtual int GetMaxSize(PGLOBAL g);
+ virtual int Cardinality(PGLOBAL) {return 10;} // To avoid assert
+ virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
@@ -275,7 +308,7 @@ class DllExport CATCOL : public COLBLK {
virtual int GetAmType(void) {return TYPE_AM_ODBC;}
// Methods
- virtual void ReadColumn(PGLOBAL g);
+ virtual void ReadColumn(PGLOBAL g);
protected:
CATCOL(void) {} // Default constructor not to be used
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 77b915bce92..07451f71f66 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -1946,8 +1946,6 @@ btr_cur_update_alloc_zip_func(
const page_t* page = page_cur_get_page(cursor);
ut_ad(page_zip == page_cur_get_page_zip(cursor));
-
- ut_ad(page_zip);
ut_ad(!dict_index_is_ibuf(index));
ut_ad(rec_offs_validate(page_cur_get_rec(cursor), index, offsets));
@@ -4371,7 +4369,6 @@ btr_cur_disown_inherited_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(mtr);
for (i = 0; i < rec_offs_n_fields(offsets); i++) {
if (rec_offs_nth_extern(offsets, i)
@@ -4434,9 +4431,6 @@ btr_push_update_extern_fields(
ulint n;
const upd_field_t* uf;
- ut_ad(tuple);
- ut_ad(update);
-
uf = update->fields;
n = upd_get_n_fields(update);
@@ -4608,7 +4602,6 @@ btr_store_big_rec_extern_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(btr_mtr);
ut_ad(mtr_memo_contains(btr_mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX));
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 94d285b64e7..2a11c15441a 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -6214,7 +6214,6 @@ dict_set_corrupted(
row_mysql_lock_data_dictionary(trx);
}
- ut_ad(index);
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(!dict_table_is_comp(dict_sys->sys_tables));
ut_ad(!dict_table_is_comp(dict_sys->sys_indexes));
diff --git a/storage/innobase/dyn/dyn0dyn.cc b/storage/innobase/dyn/dyn0dyn.cc
index 3ef5297a7c9..dd1f6863c14 100644
--- a/storage/innobase/dyn/dyn0dyn.cc
+++ b/storage/innobase/dyn/dyn0dyn.cc
@@ -40,7 +40,6 @@ dyn_array_add_block(
mem_heap_t* heap;
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 4aa30d8ebd2..c5289b48ccc 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -1071,8 +1071,6 @@ fsp_fill_free_list(
ulint i;
mtr_t ibuf_mtr;
- ut_ad(header != NULL);
- ut_ad(mtr != NULL);
ut_ad(page_offset(header) == FSP_HEADER_OFFSET);
/* Check if we can fill free list from above the free list limit */
@@ -1355,9 +1353,6 @@ fsp_alloc_free_page(
ulint page_no;
ulint space_size;
- ut_ad(mtr);
- ut_ad(init_mtr);
-
header = fsp_get_space_header(space, zip_size, mtr);
/* Get the hinted descriptor */
@@ -2370,7 +2365,6 @@ fseg_alloc_free_page_low(
ibool success;
ulint n;
- ut_ad(mtr);
ut_ad((direction >= FSP_UP) && (direction <= FSP_NO_DIR));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index 800065ddaa1..dafc36f1cab 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -267,7 +267,6 @@ dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_CLUSTERED);
@@ -281,7 +280,6 @@ dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_UNIQUE);
@@ -296,7 +294,6 @@ dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_IBUF);
@@ -328,7 +325,6 @@ dict_index_is_sec_or_ibuf(
{
ulint type;
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
type = index->type;
@@ -346,7 +342,6 @@ dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols - DATA_N_SYS_COLS);
@@ -378,7 +373,6 @@ dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols);
@@ -1546,7 +1540,6 @@ dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return((index->type & DICT_CORRUPT)
diff --git a/storage/innobase/include/dyn0dyn.ic b/storage/innobase/include/dyn0dyn.ic
index f18f2e6dff9..13003862638 100644
--- a/storage/innobase/include/dyn0dyn.ic
+++ b/storage/innobase/include/dyn0dyn.ic
@@ -47,8 +47,6 @@ dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
{
- ut_ad(block);
-
return((block->used) & ~DYN_BLOCK_FULL_FLAG);
}
@@ -76,7 +74,6 @@ dyn_array_create(
dyn_array_t* arr) /*!< in/out: memory buffer of
size sizeof(dyn_array_t) */
{
- ut_ad(arr);
#if DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG
# error "DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG"
#endif
@@ -119,7 +116,6 @@ dyn_array_push(
dyn_block_t* block;
ulint used;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -159,7 +155,6 @@ dyn_array_open(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -195,7 +190,6 @@ dyn_array_close(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
block = dyn_array_get_last_block(arr);
@@ -222,7 +216,6 @@ dyn_array_get_element(
{
const dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
/* Get the first array block */
@@ -260,7 +253,6 @@ dyn_array_get_data_size(
const dyn_block_t* block;
ulint sum = 0;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index b6c977bdc74..9ca1c46d72b 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -271,20 +272,12 @@ void
recv_sys_var_init(void);
/*===================*/
#endif /* !UNIV_HOTBACKUP */
-/*******************************************************************//**
-Empties the hash table of stored log records, applying them to appropriate
-pages. */
+/** Apply the hash table of stored log records to persistent data pages.
+@param[in] last_batch whether the change buffer merge will be
+ performed as part of the operation */
UNIV_INTERN
-dberr_t
-recv_apply_hashed_log_recs(
-/*=======================*/
- ibool allow_ibuf); /*!< in: if TRUE, also ibuf operations are
- allowed during the application; if FALSE,
- no ibuf operations are allowed, and after
- the application all file pages are flushed to
- disk and invalidated in buffer pool: this
- alternative means that no new log records
- can be generated during the application */
+void
+recv_apply_hashed_log_recs(bool last_batch);
#ifdef UNIV_HOTBACKUP
/*******************************************************************//**
Applies log records in the hash table to a backup. */
@@ -434,6 +427,8 @@ struct recv_sys_t{
scan find a corrupt log block, or a corrupt
log record, or there is a log parsing
buffer overflow */
+ /** the time when progress was last reported */
+ ib_time_t progress_time;
#ifdef UNIV_LOG_ARCHIVE
log_group_t* archive_group;
/*!< in archive recovery: the log group whose
@@ -446,6 +441,20 @@ struct recv_sys_t{
addresses in the hash table */
recv_dblwr_t dblwr;
+
+ /** Determine whether redo log recovery progress should be reported.
+ @param[in] time the current time
+ @return whether progress should be reported
+ (the last report was at least 15 seconds ago) */
+ bool report(ib_time_t time)
+ {
+ if (time - progress_time < 15) {
+ return false;
+ }
+
+ progress_time = time;
+ return true;
+ }
};
/** The recovery system */
diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic
index 881b2b6055f..215bb12cbe7 100644
--- a/storage/innobase/include/mach0data.ic
+++ b/storage/innobase/include/mach0data.ic
@@ -53,7 +53,6 @@ mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
{
- ut_ad(b);
return((ulint)(b[0]));
}
@@ -148,7 +147,6 @@ mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 16)
| ((ulint)(b[1]) << 8)
| (ulint)(b[2])
@@ -185,7 +183,6 @@ mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 24)
| ((ulint)(b[1]) << 16)
| ((ulint)(b[2]) << 8)
@@ -264,8 +261,6 @@ mach_read_compressed(
{
ulint flag;
- ut_ad(b);
-
flag = mach_read_from_1(b);
if (flag < 0x80UL) {
@@ -346,8 +341,6 @@ mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_3(b), mach_read_from_4(b + 3)));
}
@@ -377,8 +370,6 @@ mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_2(b), mach_read_from_4(b + 2)));
}
@@ -426,8 +417,6 @@ mach_ull_read_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
n = (ib_uint64_t) mach_read_compressed(b);
size = mach_get_compressed_size((ulint) n);
@@ -493,8 +482,6 @@ mach_ull_read_much_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
if (*b != (byte)0xFF) {
n = 0;
size = 0;
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index d7f1db82858..c5775188bcf 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -161,7 +161,6 @@ page_header_get_offs(
{
ulint offs;
- ut_ad(page);
ut_ad((field == PAGE_FREE)
|| (field == PAGE_LAST_INSERT)
|| (field == PAGE_HEAP_TOP));
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 624ee0bb35d..704e88ff646 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
-Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -38,6 +38,10 @@ Created 12/9/1995 Heikki Tuuri
#endif
#ifndef UNIV_HOTBACKUP
+#if MYSQL_VERSION_ID < 100200
+# include <my_systemd.h> /* sd_notifyf() */
+#endif
+
#include "mem0mem.h"
#include "buf0buf.h"
#include "buf0flu.h"
@@ -1756,7 +1760,7 @@ log_preflush_pool_modified_pages(
and we could not make a new checkpoint on the basis of the
info on the buffer pool only. */
- recv_apply_hashed_log_recs(TRUE);
+ recv_apply_hashed_log_recs(true);
}
success = buf_flush_list(ULINT_MAX, new_oldest, &n_pages);
@@ -2099,7 +2103,7 @@ log_checkpoint(
ut_ad(!srv_read_only_mode);
if (recv_recovery_is_on()) {
- recv_apply_hashed_log_recs(TRUE);
+ recv_apply_hashed_log_recs(true);
}
if (srv_unix_file_flush_method != SRV_UNIX_NOSYNC) {
@@ -2374,6 +2378,13 @@ loop:
start_lsn += len;
buf += len;
+ if (recv_sys->report(ut_time())) {
+ ib_logf(IB_LOG_LEVEL_INFO, "Read redo log up to LSN=" LSN_PF,
+ start_lsn);
+ sd_notifyf(0, "STATUS=Read redo log up to LSN=" LSN_PF,
+ start_lsn);
+ }
+
if (start_lsn != end_lsn) {
goto loop;
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index 104b5b6b421..d088ae9f3df 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -2,7 +2,7 @@
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -78,7 +78,7 @@ this must be less than UNIV_PAGE_SIZE as it is stored in the buffer pool */
#define RECV_READ_AHEAD_AREA 32
/** The recovery system */
-UNIV_INTERN recv_sys_t* recv_sys = NULL;
+UNIV_INTERN recv_sys_t* recv_sys;
/** TRUE when applying redo log records during crash recovery; FALSE
otherwise. Note that this is FALSE while a background thread is
rolling back incomplete transactions. */
@@ -134,9 +134,6 @@ UNIV_INTERN ibool recv_is_making_a_backup = FALSE;
UNIV_INTERN ibool recv_is_from_backup = FALSE;
# define buf_pool_get_curr_size() (5 * 1024 * 1024)
#endif /* !UNIV_HOTBACKUP */
-/** The following counter is used to decide when to print info on
-log scan */
-static ulint recv_scan_print_counter;
/** The type of the previous parsed redo log record */
static ulint recv_previous_parsed_rec_type;
@@ -311,8 +308,6 @@ recv_sys_var_init(void)
recv_no_ibuf_operations = FALSE;
- recv_scan_print_counter = 0;
-
recv_previous_parsed_rec_type = 999999;
recv_previous_parsed_rec_offset = 0;
@@ -426,6 +421,7 @@ recv_sys_init(
recv_sys->last_block_buf_start, OS_FILE_LOG_BLOCK_SIZE));
recv_sys->found_corrupt_log = FALSE;
+ recv_sys->progress_time = ut_time();
recv_max_page_lsn = 0;
@@ -435,33 +431,18 @@ recv_sys_init(
mutex_exit(&(recv_sys->mutex));
}
-/********************************************************//**
-Empties the hash table when it has been fully processed.
-@return DB_SUCCESS when successfull or DB_ERROR when fails. */
+/** Empty a fully processed hash table. */
static
-dberr_t
-recv_sys_empty_hash(void)
-/*=====================*/
+void
+recv_sys_empty_hash()
{
ut_ad(mutex_own(&(recv_sys->mutex)));
-
- if (recv_sys->n_addrs != 0) {
- fprintf(stderr,
- "InnoDB: Error: %lu pages with log records"
- " were left unprocessed!\n"
- "InnoDB: Maximum page number with"
- " log records on it %lu\n",
- (ulong) recv_sys->n_addrs,
- (ulong) recv_max_parsed_page_no);
- return DB_ERROR;
- }
+ ut_a(recv_sys->n_addrs == 0);
hash_table_free(recv_sys->addr_hash);
mem_heap_empty(recv_sys->heap);
recv_sys->addr_hash = hash_create(buf_pool_get_curr_size() / 512);
-
- return DB_SUCCESS;
}
#ifndef UNIV_HOTBACKUP
@@ -1716,7 +1697,9 @@ recv_recover_page_func(
mtr_commit(&mtr);
- mutex_enter(&(recv_sys->mutex));
+ ib_time_t time = ut_time();
+
+ mutex_enter(&recv_sys->mutex);
if (recv_max_page_lsn < page_lsn) {
recv_max_page_lsn = page_lsn;
@@ -1724,11 +1707,17 @@ recv_recover_page_func(
recv_addr->state = RECV_PROCESSED;
- ut_a(recv_sys->n_addrs);
- recv_sys->n_addrs--;
-
- mutex_exit(&(recv_sys->mutex));
+ ut_a(recv_sys->n_addrs > 0);
+ if (ulint n = --recv_sys->n_addrs) {
+ if (recv_sys->report(time)) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "To recover: " ULINTPF " pages from log", n);
+ sd_notifyf(0, "STATUS=To recover: " ULINTPF
+ " pages from log", n);
+ }
+ }
+ mutex_exit(&recv_sys->mutex);
}
#ifndef UNIV_HOTBACKUP
@@ -1774,62 +1763,50 @@ recv_read_in_area(
}
buf_read_recv_pages(FALSE, space, zip_size, page_nos, n);
- /*
- fprintf(stderr, "Recv pages at %lu n %lu\n", page_nos[0], n);
- */
return(n);
}
-/*******************************************************************//**
-Empties the hash table of stored log records, applying them to appropriate
-pages.
-@return DB_SUCCESS when successfull or DB_ERROR when fails. */
+/** Apply the hash table of stored log records to persistent data pages.
+@param[in] last_batch whether the change buffer merge will be
+ performed as part of the operation */
UNIV_INTERN
-dberr_t
-recv_apply_hashed_log_recs(
-/*=======================*/
- ibool allow_ibuf) /*!< in: if TRUE, also ibuf operations are
- allowed during the application; if FALSE,
- no ibuf operations are allowed, and after
- the application all file pages are flushed to
- disk and invalidated in buffer pool: this
- alternative means that no new log records
- can be generated during the application;
- the caller must in this case own the log
- mutex */
+void
+recv_apply_hashed_log_recs(bool last_batch)
{
- recv_addr_t* recv_addr;
- ulint i;
- ibool has_printed = FALSE;
- ulong progress;
- mtr_t mtr;
- dberr_t err = DB_SUCCESS;
-loop:
- mutex_enter(&(recv_sys->mutex));
-
- if (recv_sys->apply_batch_on) {
+ for (;;) {
+ mutex_enter(&recv_sys->mutex);
- mutex_exit(&(recv_sys->mutex));
+ if (!recv_sys->apply_batch_on) {
+ break;
+ }
+ mutex_exit(&recv_sys->mutex);
os_thread_sleep(500000);
-
- goto loop;
}
- ut_ad((!allow_ibuf) == mutex_own(&log_sys->mutex));
+ ut_ad(!last_batch == mutex_own(&log_sys->mutex));
- if (!allow_ibuf) {
+ if (!last_batch) {
recv_no_ibuf_operations = TRUE;
}
+ if (ulint n = recv_sys->n_addrs) {
+ const char* msg = last_batch
+ ? "Starting final batch to recover "
+ : "Starting a batch to recover ";
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "%s" ULINTPF " pages from redo log", msg, n);
+ sd_notifyf(0, "STATUS=%s" ULINTPF " pages from redo log",
+ msg, n);
+ }
+
recv_sys->apply_log_recs = TRUE;
recv_sys->apply_batch_on = TRUE;
- for (i = 0; i < hash_get_n_cells(recv_sys->addr_hash); i++) {
-
- for (recv_addr = static_cast<recv_addr_t*>(
- HASH_GET_FIRST(recv_sys->addr_hash, i));
- recv_addr != 0;
+ for (ulint i = 0; i < hash_get_n_cells(recv_sys->addr_hash); i++) {
+ for (recv_addr_t* recv_addr = static_cast<recv_addr_t*>(
+ HASH_GET_FIRST(recv_sys->addr_hash, i));
+ recv_addr;
recv_addr = static_cast<recv_addr_t*>(
HASH_GET_NEXT(addr_hash, recv_addr))) {
@@ -1838,24 +1815,12 @@ loop:
ulint page_no = recv_addr->page_no;
if (recv_addr->state == RECV_NOT_PROCESSED) {
- if (!has_printed) {
- ib_logf(IB_LOG_LEVEL_INFO,
- "Starting an apply batch"
- " of log records"
- " to the database...");
- fputs("InnoDB: Progress in percent: ",
- stderr);
- has_printed = TRUE;
- }
-
- mutex_exit(&(recv_sys->mutex));
+ mutex_exit(&recv_sys->mutex);
if (buf_page_peek(space, page_no)) {
- buf_block_t* block;
-
+ mtr_t mtr;
mtr_start(&mtr);
-
- block = buf_page_get(
+ buf_block_t* block = buf_page_get(
space, zip_size, page_no,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(
@@ -1868,21 +1833,9 @@ loop:
page_no);
}
- mutex_enter(&(recv_sys->mutex));
+ mutex_enter(&recv_sys->mutex);
}
}
-
- progress = (ulong) (i * 100)
- / hash_get_n_cells(recv_sys->addr_hash);
- if (has_printed
- && progress
- != ((i + 1) * 100)
- / hash_get_n_cells(recv_sys->addr_hash)) {
-
- fprintf(stderr, "%lu ", progress);
- sd_notifyf(0, "STATUS=Applying batch of log records for"
- " InnoDB: Progress %lu", progress);
- }
}
/* Wait until all the pages have been processed */
@@ -1896,12 +1849,7 @@ loop:
mutex_enter(&(recv_sys->mutex));
}
- if (has_printed) {
-
- fprintf(stderr, "\n");
- }
-
- if (!allow_ibuf) {
+ if (!last_batch) {
bool success;
/* Flush all the file pages to disk and invalidate them in
@@ -1939,16 +1887,9 @@ loop:
recv_sys->apply_log_recs = FALSE;
recv_sys->apply_batch_on = FALSE;
- err = recv_sys_empty_hash();
-
- if (has_printed) {
- fprintf(stderr, "InnoDB: Apply batch completed\n");
- sd_notify(0, "STATUS=InnoDB: Apply batch completed");
- }
-
- mutex_exit(&(recv_sys->mutex));
+ recv_sys_empty_hash();
- return err;
+ mutex_exit(&recv_sys->mutex);
}
#else /* !UNIV_HOTBACKUP */
/*******************************************************************//**
@@ -1971,11 +1912,6 @@ recv_apply_log_recs_for_backup(void)
block = back_block1;
- ib_logf(IB_LOG_LEVEL_INFO,
- "Starting an apply batch of log records to the database...");
-
- fputs("InnoDB: Progress in percent: ", stderr);
-
n_hash_cells = hash_get_n_cells(recv_sys->addr_hash);
for (i = 0; i < n_hash_cells; i++) {
@@ -2087,16 +2023,6 @@ recv_apply_log_recs_for_backup(void)
skip_this_recv_addr:
recv_addr = HASH_GET_NEXT(addr_hash, recv_addr);
}
-
- if ((100 * i) / n_hash_cells
- != (100 * (i + 1)) / n_hash_cells) {
- fprintf(stderr, "%lu ",
- (ulong) ((100 * i) / n_hash_cells));
- fflush(stderr);
- sd_notifyf(0, "STATUS=Applying batch of log records for"
- " backup InnoDB: Progress %lu",
- (ulong) (100 * i) / n_hash_cells);
- }
}
sd_notify(0, "STATUS=InnoDB: Apply batch for backup completed");
@@ -2797,11 +2723,10 @@ recv_scan_log_recs(
#ifndef UNIV_HOTBACKUP
if (recv_log_scan_is_startup_type
&& !recv_needed_recovery) {
-
if (!srv_read_only_mode) {
ib_logf(IB_LOG_LEVEL_INFO,
- "Log scan progressed past the "
- "checkpoint lsn " LSN_PF "",
+ "Starting crash recovery from "
+ "checkpoint LSN=" LSN_PF,
recv_sys->scanned_lsn);
recv_init_crash_recovery();
@@ -2861,19 +2786,6 @@ recv_scan_log_recs(
*group_scanned_lsn = scanned_lsn;
- if (recv_needed_recovery
- || (recv_is_from_backup && !recv_is_making_a_backup)) {
- recv_scan_print_counter++;
-
- if (finished || (recv_scan_print_counter % 80 == 0)) {
-
- fprintf(stderr,
- "InnoDB: Doing recovery: scanned up to"
- " log sequence number " LSN_PF "\n",
- *group_scanned_lsn);
- }
- }
-
if (more_data && !recv_sys->found_corrupt_log) {
/* Try to parse more log records */
@@ -2893,12 +2805,7 @@ recv_scan_log_recs(
log yet: they would be produced by ibuf
operations */
- *err = recv_apply_hashed_log_recs(FALSE);
-
- if (*err != DB_SUCCESS) {
- /* Finish processing because of error */
- return (TRUE);
- }
+ recv_apply_hashed_log_recs(false);
}
#endif /* !UNIV_HOTBACKUP */
@@ -2982,11 +2889,6 @@ recv_init_crash_recovery(void)
recv_needed_recovery = TRUE;
- ib_logf(IB_LOG_LEVEL_INFO, "Database was not shutdown normally!");
- ib_logf(IB_LOG_LEVEL_INFO, "Starting crash recovery.");
- ib_logf(IB_LOG_LEVEL_INFO,
- "Reading tablespace information from the .ibd files...");
-
fil_load_single_table_tablespaces();
/* If we are using the doublewrite method, we will
@@ -2997,9 +2899,7 @@ recv_init_crash_recovery(void)
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
ib_logf(IB_LOG_LEVEL_INFO,
- "Restoring possible half-written data pages ");
-
- ib_logf(IB_LOG_LEVEL_INFO,
+ "Restoring possible half-written data pages "
"from the doublewrite buffer...");
buf_dblwr_process();
@@ -3984,7 +3884,7 @@ recv_recovery_from_archive_start(
if (limit_lsn != LSN_MAX) {
- recv_apply_hashed_log_recs(FALSE);
+ recv_apply_hashed_log_recs(false);
recv_reset_logs(0, FALSE, recv_sys->recovered_lsn);
}
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 5843dd80524..e40aa43193a 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -309,7 +309,6 @@ mtr_commit(
/*=======*/
mtr_t* mtr) /*!< in: mini-transaction */
{
- ut_ad(mtr);
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(!mtr->inside_ibuf);
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index a09f270a54f..41f56fd4559 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -1450,7 +1450,6 @@ page_dir_split_slot(
ulint i;
ulint n_owned;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
@@ -1512,7 +1511,6 @@ page_dir_balance_slot(
rec_t* old_rec;
rec_t* new_rec;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 2bf1f324784..ee430435da2 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -4807,8 +4807,6 @@ page_zip_parse_compress(
ulint size;
ulint trailer_size;
- ut_ad(ptr != NULL);
- ut_ad(end_ptr != NULL);
ut_ad(!page == !page_zip);
if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) {
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 926e8a44152..047f2685d7a 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1065,14 +1065,8 @@ row_merge_read_rec(
ulint data_size;
ulint avail_size;
- ut_ad(block);
- ut_ad(buf);
ut_ad(b >= &block[0]);
ut_ad(b < &block[srv_sort_buf_size]);
- ut_ad(index);
- ut_ad(foffs);
- ut_ad(mrec);
- ut_ad(offsets);
ut_ad(*offsets == 1 + REC_OFFS_HEADER_SIZE
+ dict_index_get_n_fields(index));
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 54bf50cba3d..13b14a0d174 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1282,8 +1282,6 @@ row_upd_index_replace_new_col_vals_index_pos(
ulint n_fields;
const ulint zip_size = dict_table_zip_size(index->table);
- ut_ad(index);
-
dtuple_set_info_bits(entry, update->info_bits);
if (order_only) {
@@ -1468,8 +1466,6 @@ row_upd_changes_ord_field_binary_func(
ulint i;
const dict_index_t* clust_index;
- ut_ad(index);
- ut_ad(update);
ut_ad(thr);
ut_ad(thr->graph);
ut_ad(thr->graph->trx);
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 79cd3aebdd0..58117859de8 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -3,7 +3,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2017, MariaDB Corporation
+Copyright (c) 2013, 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -2483,7 +2483,7 @@ files_checked:
return(err);
}
- /* This must precede recv_apply_hashed_log_recs(TRUE). */
+ /* This must precede recv_apply_hashed_log_recs(true). */
ib_bh = trx_sys_init_at_db_start();
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
@@ -2491,12 +2491,8 @@ files_checked:
respective file pages, for the last batch of
recv_group_scan_log_recs(). */
- err = recv_apply_hashed_log_recs(TRUE);
+ recv_apply_hashed_log_recs(true);
DBUG_PRINT("ib_log", ("apply completed"));
-
- if (err != DB_SUCCESS) {
- return(err);
- }
}
if (!srv_read_only_mode) {
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index 628925fcc9b..1109d86c146 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index c1a99370d80..29edcea74d7 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -1,6 +1,6 @@
/* Copyright (C) 2004-2008 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
Copyright (C) 2008-2009 Sun Microsystems, Inc.
- Copyright (c) 2009, 2014, SkySQL Ab.
+ Copyright (c) 2009, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1282,75 +1282,75 @@ int ha_maria::write_row(uchar * buf)
int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt)
{
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
MARIA_SHARE *share= file->s;
const char *old_proc_info;
TRN *old_trn= file->trn;
- if (!file || !&param) return HA_ADMIN_INTERNAL_ERROR;
+ if (!file || !param) return HA_ADMIN_INTERNAL_ERROR;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "check";
- param.db_name= table->s->db.str;
- param.table_name= table->alias.c_ptr();
- param.testflag= check_opt->flags | T_CHECK | T_SILENT;
- param.stats_method= (enum_handler_stats_method)THDVAR(thd,stats_method);
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "check";
+ param->db_name= table->s->db.str;
+ param->table_name= table->alias.c_ptr();
+ param->testflag= check_opt->flags | T_CHECK | T_SILENT;
+ param->stats_method= (enum_handler_stats_method)THDVAR(thd,stats_method);
if (!(table->db_stat & HA_READ_ONLY))
- param.testflag |= T_STATISTICS;
- param.using_global_keycache= 1;
+ param->testflag |= T_STATISTICS;
+ param->using_global_keycache= 1;
if (!maria_is_crashed(file) &&
- (((param.testflag & T_CHECK_ONLY_CHANGED) &&
+ (((param->testflag & T_CHECK_ONLY_CHANGED) &&
!(share->state.changed & (STATE_CHANGED | STATE_CRASHED_FLAGS |
STATE_IN_REPAIR)) &&
share->state.open_count == 0) ||
- ((param.testflag & T_FAST) && (share->state.open_count ==
+ ((param->testflag & T_FAST) && (share->state.open_count ==
(uint) (share->global_changed ? 1 :
0)))))
return HA_ADMIN_ALREADY_DONE;
- maria_chk_init_for_check(&param, file);
+ maria_chk_init_for_check(param, file);
if ((file->s->state.changed & (STATE_CRASHED_FLAGS | STATE_MOVED)) ==
STATE_MOVED)
{
- _ma_check_print_error(&param, "%s", zerofill_error_msg);
+ _ma_check_print_error(param, "%s", zerofill_error_msg);
return HA_ADMIN_CORRUPT;
}
old_proc_info= thd_proc_info(thd, "Checking status");
thd_progress_init(thd, 3);
- error= maria_chk_status(&param, file); // Not fatal
- if (maria_chk_size(&param, file))
+ error= maria_chk_status(param, file); // Not fatal
+ if (maria_chk_size(param, file))
error= 1;
if (!error)
- error|= maria_chk_del(&param, file, param.testflag);
+ error|= maria_chk_del(param, file, param->testflag);
thd_proc_info(thd, "Checking keys");
thd_progress_next_stage(thd);
if (!error)
- error= maria_chk_key(&param, file);
+ error= maria_chk_key(param, file);
thd_proc_info(thd, "Checking data");
thd_progress_next_stage(thd);
if (!error)
{
- if ((!(param.testflag & T_QUICK) &&
+ if ((!(param->testflag & T_QUICK) &&
((share->options &
(HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ||
- (param.testflag & (T_EXTEND | T_MEDIUM)))) || maria_is_crashed(file))
+ (param->testflag & (T_EXTEND | T_MEDIUM)))) || maria_is_crashed(file))
{
- ulonglong old_testflag= param.testflag;
- param.testflag |= T_MEDIUM;
- if (!(error= init_io_cache(&param.read_cache, file->dfile.file,
+ ulonglong old_testflag= param->testflag;
+ param->testflag |= T_MEDIUM;
+ if (!(error= init_io_cache(&param->read_cache, file->dfile.file,
my_default_record_cache_size, READ_CACHE,
share->pack.header_length, 1, MYF(MY_WME))))
{
- error= maria_chk_data_link(&param, file,
- MY_TEST(param.testflag & T_EXTEND));
- end_io_cache(&(param.read_cache));
+ error= maria_chk_data_link(param, file,
+ MY_TEST(param->testflag & T_EXTEND));
+ end_io_cache(&param->read_cache);
}
- param.testflag= old_testflag;
+ param->testflag= old_testflag;
}
}
if (!error)
@@ -1358,7 +1358,7 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt)
if ((share->state.changed & (STATE_CHANGED |
STATE_CRASHED_FLAGS |
STATE_IN_REPAIR | STATE_NOT_ANALYZED)) ||
- (param.testflag & T_STATISTICS) || maria_is_crashed(file))
+ (param->testflag & T_STATISTICS) || maria_is_crashed(file))
{
file->update |= HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
mysql_mutex_lock(&share->intern_lock);
@@ -1366,7 +1366,7 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt)
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED_FLAGS |
STATE_IN_REPAIR);
if (!(table->db_stat & HA_READ_ONLY))
- error= maria_update_state_info(&param, file,
+ error= maria_update_state_info(param, file,
UPDATE_TIME | UPDATE_OPEN_COUNT |
UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
@@ -1397,33 +1397,33 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt)
int ha_maria::analyze(THD *thd, HA_CHECK_OPT * check_opt)
{
int error= 0;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
MARIA_SHARE *share= file->s;
const char *old_proc_info;
- if (!&param)
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "analyze";
- param.db_name= table->s->db.str;
- param.table_name= table->alias.c_ptr();
- param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "analyze";
+ param->db_name= table->s->db.str;
+ param->table_name= table->alias.c_ptr();
+ param->testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
T_DONT_CHECK_CHECKSUM);
- param.using_global_keycache= 1;
- param.stats_method= (enum_handler_stats_method)THDVAR(thd,stats_method);
+ param->using_global_keycache= 1;
+ param->stats_method= (enum_handler_stats_method)THDVAR(thd,stats_method);
if (!(share->state.changed & STATE_NOT_ANALYZED))
return HA_ADMIN_ALREADY_DONE;
old_proc_info= thd_proc_info(thd, "Scanning");
thd_progress_init(thd, 1);
- error= maria_chk_key(&param, file);
+ error= maria_chk_key(param, file);
if (!error)
{
mysql_mutex_lock(&share->intern_lock);
- error= maria_update_state_info(&param, file, UPDATE_STAT);
+ error= maria_update_state_info(param, file, UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
}
else if (!maria_is_crashed(file) && !thd->killed)
@@ -1436,46 +1436,46 @@ int ha_maria::analyze(THD *thd, HA_CHECK_OPT * check_opt)
int ha_maria::repair(THD * thd, HA_CHECK_OPT *check_opt)
{
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
ha_rows start_records;
const char *old_proc_info;
- if (!file || !&param)
+ if (!file || !param)
return HA_ADMIN_INTERNAL_ERROR;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "repair";
- param.testflag= ((check_opt->flags & ~(T_EXTEND)) |
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "repair";
+ param->testflag= ((check_opt->flags & ~(T_EXTEND)) |
T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM |
(check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT));
- param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
- param.backup_time= check_opt->start_time;
+ param->sort_buffer_length= THDVAR(thd, sort_buffer_size);
+ param->backup_time= check_opt->start_time;
start_records= file->state->records;
old_proc_info= thd_proc_info(thd, "Checking table");
thd_progress_init(thd, 1);
- while ((error= repair(thd, &param, 0)) && param.retry_repair)
+ while ((error= repair(thd, param, 0)) && param->retry_repair)
{
- param.retry_repair= 0;
- if (test_all_bits(param.testflag,
+ param->retry_repair= 0;
+ if (test_all_bits(param->testflag,
(uint) (T_RETRY_WITHOUT_QUICK | T_QUICK)))
{
- param.testflag&= ~(T_RETRY_WITHOUT_QUICK | T_QUICK);
+ param->testflag&= ~(T_RETRY_WITHOUT_QUICK | T_QUICK);
/* Ensure we don't loose any rows when retrying without quick */
- param.testflag|= T_SAFE_REPAIR;
+ param->testflag|= T_SAFE_REPAIR;
if (thd->vio_ok())
- _ma_check_print_info(&param, "Retrying repair without quick");
+ _ma_check_print_info(param, "Retrying repair without quick");
else
sql_print_information("Retrying repair of: '%s' without quick",
table->s->path.str);
continue;
}
- param.testflag &= ~T_QUICK;
- if ((param.testflag & T_REP_BY_SORT))
+ param->testflag &= ~T_QUICK;
+ if (param->testflag & T_REP_BY_SORT)
{
- param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP;
+ param->testflag= (param->testflag & ~T_REP_BY_SORT) | T_REP;
if (thd->vio_ok())
- _ma_check_print_info(&param, "Retrying repair with keycache");
+ _ma_check_print_info(param, "Retrying repair with keycache");
sql_print_information("Retrying repair of: '%s' with keycache",
table->s->path.str);
continue;
@@ -1499,20 +1499,20 @@ int ha_maria::repair(THD * thd, HA_CHECK_OPT *check_opt)
int ha_maria::zerofill(THD * thd, HA_CHECK_OPT *check_opt)
{
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
TRN *old_trn;
MARIA_SHARE *share= file->s;
- if (!file || !&param)
+ if (!file || !param)
return HA_ADMIN_INTERNAL_ERROR;
old_trn= file->trn;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "zerofill";
- param.testflag= check_opt->flags | T_SILENT | T_ZEROFILL;
- param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
- error=maria_zerofill(&param, file, share->open_file_name.str);
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "zerofill";
+ param->testflag= check_opt->flags | T_SILENT | T_ZEROFILL;
+ param->sort_buffer_length= THDVAR(thd, sort_buffer_size);
+ error=maria_zerofill(param, file, share->open_file_name.str);
/* Reset trn, that may have been set by repair */
_ma_set_trn_for_table(file, old_trn);
@@ -1522,7 +1522,7 @@ int ha_maria::zerofill(THD * thd, HA_CHECK_OPT *check_opt)
TrID create_trid= trnman_get_min_safe_trid();
mysql_mutex_lock(&share->intern_lock);
share->state.changed|= STATE_NOT_MOVABLE;
- maria_update_state_info(&param, file, UPDATE_TIME | UPDATE_OPEN_COUNT);
+ maria_update_state_info(param, file, UPDATE_TIME | UPDATE_OPEN_COUNT);
_ma_update_state_lsns_sub(share, LSN_IMPOSSIBLE, create_trid,
TRUE, TRUE);
mysql_mutex_unlock(&share->intern_lock);
@@ -1533,24 +1533,24 @@ int ha_maria::zerofill(THD * thd, HA_CHECK_OPT *check_opt)
int ha_maria::optimize(THD * thd, HA_CHECK_OPT *check_opt)
{
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
- if (!file || !&param)
+ if (!file || !param)
return HA_ADMIN_INTERNAL_ERROR;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "optimize";
- param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "optimize";
+ param->testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX);
- param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
+ param->sort_buffer_length= THDVAR(thd, sort_buffer_size);
thd_progress_init(thd, 1);
- if ((error= repair(thd, &param, 1)) && param.retry_repair)
+ if ((error= repair(thd, param, 1)) && param->retry_repair)
{
sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying",
- my_errno, param.db_name, param.table_name);
- param.testflag &= ~T_REP_BY_SORT;
- error= repair(thd, &param, 0);
+ my_errno, param->db_name, param->table_name);
+ param->testflag &= ~T_REP_BY_SORT;
+ error= repair(thd, param, 0);
}
thd_progress_end(thd);
return error;
@@ -1800,17 +1800,17 @@ int ha_maria::assign_to_keycache(THD * thd, HA_CHECK_OPT *check_opt)
if (error != HA_ADMIN_OK)
{
/* Send error to user */
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
- if (!&param)
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "assign_to_keycache";
- param.db_name= table->s->db.str;
- param.table_name= table->s->table_name.str;
- param.testflag= 0;
- _ma_check_print_error(&param, errmsg);
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "assign_to_keycache";
+ param->db_name= table->s->db.str;
+ param->table_name= table->s->table_name.str;
+ param->testflag= 0;
+ _ma_check_print_error(param, errmsg);
}
DBUG_RETURN(error);
#else
@@ -1864,17 +1864,17 @@ int ha_maria::preload_keys(THD * thd, HA_CHECK_OPT *check_opt)
errmsg= buf;
}
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
- if (!&param)
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
- maria_chk_init(&param);
- param.thd= thd;
- param.op_name= "preload_keys";
- param.db_name= table->s->db.str;
- param.table_name= table->s->table_name.str;
- param.testflag= 0;
- _ma_check_print_error(&param, "%s", errmsg);
+ maria_chk_init(param);
+ param->thd= thd;
+ param->op_name= "preload_keys";
+ param->db_name= table->s->db.str;
+ param->table_name= table->s->table_name.str;
+ param->testflag= 0;
+ _ma_check_print_error(param, "%s", errmsg);
DBUG_RETURN(HA_ADMIN_FAILED);
}
DBUG_RETURN(HA_ADMIN_OK);
@@ -1975,25 +1975,25 @@ int ha_maria::enable_indexes(uint mode)
else if (mode == HA_KEY_SWITCH_NONUNIQ_SAVE)
{
THD *thd= table->in_use;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
- if (!&param)
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
const char *save_proc_info= thd_proc_info(thd, "Creating index");
- maria_chk_init(&param);
- param.op_name= "recreating_index";
- param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
+ maria_chk_init(param);
+ param->op_name= "recreating_index";
+ param->testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS | T_SAFE_REPAIR);
/*
Don't lock and unlock table if it's locked.
Normally table should be locked. This test is mostly for safety.
*/
if (likely(file->lock_type != F_UNLCK))
- param.testflag|= T_NO_LOCKS;
+ param->testflag|= T_NO_LOCKS;
if (file->create_unique_index_by_sort)
- param.testflag|= T_CREATE_UNIQUE_BY_SORT;
+ param->testflag|= T_CREATE_UNIQUE_BY_SORT;
if (bulk_insert_single_undo == BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)
{
@@ -2002,23 +2002,23 @@ int ha_maria::enable_indexes(uint mode)
Don't bump create_rename_lsn, because UNDO_BULK_INSERT
should not be skipped in case of crash during repair.
*/
- param.testflag|= T_NO_CREATE_RENAME_LSN;
+ param->testflag|= T_NO_CREATE_RENAME_LSN;
}
- param.myf_rw &= ~MY_WAIT_IF_FULL;
- param.sort_buffer_length= THDVAR(thd,sort_buffer_size);
- param.stats_method= (enum_handler_stats_method)THDVAR(thd,stats_method);
- param.tmpdir= &mysql_tmpdir_list;
- if ((error= (repair(thd, &param, 0) != HA_ADMIN_OK)) && param.retry_repair)
+ param->myf_rw &= ~MY_WAIT_IF_FULL;
+ param->sort_buffer_length= THDVAR(thd,sort_buffer_size);
+ param->stats_method= (enum_handler_stats_method)THDVAR(thd,stats_method);
+ param->tmpdir= &mysql_tmpdir_list;
+ if ((error= (repair(thd, param, 0) != HA_ADMIN_OK)) && param->retry_repair)
{
sql_print_warning("Warning: Enabling keys got errno %d on %s.%s, "
"retrying",
- my_errno, param.db_name, param.table_name);
+ my_errno, param->db_name, param->table_name);
/* This should never fail normally */
DBUG_ASSERT(thd->killed != 0);
/* Repairing by sort failed. Now try standard repair method. */
- param.testflag &= ~T_REP_BY_SORT;
- error= (repair(thd, &param, 0) != HA_ADMIN_OK);
+ param->testflag &= ~T_REP_BY_SORT;
+ error= (repair(thd, param, 0) != HA_ADMIN_OK);
/*
If the standard repair succeeded, clear all error messages which
might have been set by the first repair. They can still be seen
@@ -3597,10 +3597,6 @@ static int ha_maria_init(void *p)
maria_pagecache->extra_debug= 1;
maria_assert_if_crashed_table= debug_assert_if_crashed_table;
-#if defined(HAVE_REALPATH) && !defined(HAVE_valgrind) && !defined(HAVE_BROKEN_REALPATH)
- /* We can only test for sub paths if my_symlink.c is using realpath */
- maria_test_invalid_symlink= test_if_data_home_dir;
-#endif
if (res)
maria_hton= 0;
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 8424817bc6b..4461dd40ae3 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -2837,7 +2837,7 @@ int maria_repair(HA_CHECK *param, register MARIA_HA *info,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0)) |
sync_dir) ||
- _ma_open_datafile(info, share, NullS, -1))
+ _ma_open_datafile(info, share))
{
goto err;
}
@@ -3998,7 +3998,7 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0)) |
sync_dir) ||
- _ma_open_datafile(info, share, NullS, -1))
+ _ma_open_datafile(info, share))
{
_ma_check_print_error(param, "Couldn't change to new data file");
goto err;
@@ -4638,7 +4638,7 @@ err:
MYF((param->testflag & T_BACKUP_DATA ?
MY_REDEL_MAKE_BACKUP : 0) |
sync_dir)) ||
- _ma_open_datafile(info,share, NullS, -1))
+ _ma_open_datafile(info,share))
got_error=1;
}
}
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index 0680b5d568e..5f7ac333d5d 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -54,7 +54,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
uint max_field_lengths, extra_header_size, column_nr;
uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
- char filename[FN_REFLEN], linkname[FN_REFLEN], *linkname_ptr;
+ char kfilename[FN_REFLEN], klinkname[FN_REFLEN], *klinkname_ptr;
+ char dfilename[FN_REFLEN], dlinkname[FN_REFLEN], *dlinkname_ptr;
ulong pack_reclength;
ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
@@ -846,19 +847,19 @@ int maria_create(const char *name, enum data_file_type datafile_type,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->index_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->index_file_name, MARIA_NAME_IEXT,
+ fn_format(kfilename, name, ci->index_file_name, MARIA_NAME_IEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME |
MY_RETURN_REAL_PATH | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->index_file_name, "", MARIA_NAME_IEXT,
+ fn_format(kfilename, ci->index_file_name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "", MARIA_NAME_IEXT,
+ fn_format(klinkname, name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT);
- linkname_ptr= linkname;
+ klinkname_ptr= klinkname;
/*
Don't create the table if the link or file exists to ensure that one
doesn't accidently destroy another table.
@@ -872,10 +873,10 @@ int maria_create(const char *name, enum data_file_type datafile_type,
{
char *iext= strrchr(name, '.');
int have_iext= iext && !strcmp(iext, MARIA_NAME_IEXT);
- fn_format(filename, name, "", MARIA_NAME_IEXT,
+ fn_format(kfilename, name, "", MARIA_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
- linkname_ptr= NullS;
+ klinkname_ptr= NullS;
/*
Replace the current file.
Don't sync dir now if the data file has the same path.
@@ -895,7 +896,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
NOTE: The filename is compared against unique_file_name of every
open table. Hence we need a real path here.
*/
- if (!internal_table && _ma_test_if_reopen(filename))
+ if (!internal_table && _ma_test_if_reopen(kfilename))
{
my_printf_error(HA_ERR_TABLE_EXIST, "Aria table '%s' is in use "
"(most likely by a MERGE table). Try FLUSH TABLES.",
@@ -904,8 +905,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
goto err;
}
- if ((file= mysql_file_create_with_symlink(key_file_kfile, linkname_ptr,
- filename, 0, create_mode,
+ if ((file= mysql_file_create_with_symlink(key_file_kfile, klinkname_ptr,
+ kfilename, 0, create_mode,
MYF(MY_WME|create_flag))) < 0)
goto err;
errpos=1;
@@ -1165,30 +1166,30 @@ int maria_create(const char *name, enum data_file_type datafile_type,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->data_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->data_file_name, MARIA_NAME_DEXT,
+ fn_format(dfilename, name, ci->data_file_name, MARIA_NAME_DEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->data_file_name, "", MARIA_NAME_DEXT,
+ fn_format(dfilename, ci->data_file_name, "", MARIA_NAME_DEXT,
MY_UNPACK_FILENAME |
(have_dext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "",MARIA_NAME_DEXT,
+ fn_format(dlinkname, name, "",MARIA_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr= linkname;
+ dlinkname_ptr= dlinkname;
create_flag=0;
}
else
{
- fn_format(filename,name,"", MARIA_NAME_DEXT,
+ fn_format(dfilename,name,"", MARIA_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr= NullS;
+ dlinkname_ptr= NullS;
create_flag= (flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
if ((dfile=
- mysql_file_create_with_symlink(key_file_dfile, linkname_ptr,
- filename, 0, create_mode,
+ mysql_file_create_with_symlink(key_file_dfile, dlinkname_ptr,
+ dfilename, 0, create_mode,
MYF(MY_WME | create_flag | sync_dir))) < 0)
goto err;
errpos=3;
@@ -1239,19 +1240,21 @@ err_no_lock:
mysql_file_close(dfile, MYF(0));
/* fall through */
case 2:
- if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(key_file_dfile,
- fn_format(filename,name,"",MARIA_NAME_DEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- sync_dir);
+ if (! (flags & HA_DONT_TOUCH_DATA))
+ {
+ mysql_file_delete(key_file_dfile, dfilename, MYF(sync_dir));
+ if (dlinkname_ptr)
+ mysql_file_delete(key_file_dfile, dlinkname_ptr, MYF(sync_dir));
+ }
/* fall through */
case 1:
mysql_file_close(file, MYF(0));
if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(key_file_kfile,
- fn_format(filename,name,"",MARIA_NAME_IEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- sync_dir);
+ {
+ mysql_file_delete(key_file_kfile, kfilename, MYF(sync_dir));
+ if (klinkname_ptr)
+ mysql_file_delete(key_file_kfile, klinkname_ptr, MYF(sync_dir));
+ }
}
ma_crypt_free(&share);
my_free(log_data);
diff --git a/storage/maria/ma_delete_table.c b/storage/maria/ma_delete_table.c
index ec68902485b..970ac792623 100644
--- a/storage/maria/ma_delete_table.c
+++ b/storage/maria/ma_delete_table.c
@@ -84,25 +84,15 @@ int maria_delete_table(const char *name)
int maria_delete_table_files(const char *name, my_bool temporary, myf sync_dir)
{
- char from[FN_REFLEN];
DBUG_ENTER("maria_delete_table_files");
- fn_format(from,name,"",MARIA_NAME_IEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT);
- if (mysql_file_delete_with_symlink(key_file_kfile, from,
- MYF(MY_WME | sync_dir)))
- DBUG_RETURN(my_errno);
- fn_format(from,name,"",MARIA_NAME_DEXT,MY_UNPACK_FILENAME|MY_APPEND_EXT);
- if (mysql_file_delete_with_symlink(key_file_dfile, from,
- MYF(MY_WME | sync_dir)))
+ if (my_handler_delete_with_symlink(key_file_kfile, name, MARIA_NAME_IEXT, MYF(MY_WME | sync_dir)) ||
+ my_handler_delete_with_symlink(key_file_dfile, name, MARIA_NAME_DEXT, MYF(MY_WME | sync_dir)))
DBUG_RETURN(my_errno);
- // optional files from maria_pack:
- if (!temporary)
- {
- fn_format(from,name,"",".TMD",MY_UNPACK_FILENAME|MY_APPEND_EXT);
- mysql_file_delete_with_symlink(key_file_dfile, from, MYF(0));
- fn_format(from,name,"",".OLD",MY_UNPACK_FILENAME|MY_APPEND_EXT);
- mysql_file_delete_with_symlink(key_file_dfile, from, MYF(0));
+ if (!temporary) {
+ my_handler_delete_with_symlink(key_file_dfile, name, ".TMD", MYF(0));
+ my_handler_delete_with_symlink(key_file_dfile, name, ".OLD", MYF(0));
}
DBUG_RETURN(0);
}
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 4e97c6b43b9..0cf4140941a 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -87,7 +87,7 @@ MARIA_HA *_ma_test_if_reopen(const char *filename)
*/
-static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
+static MARIA_HA *maria_clone_internal(MARIA_SHARE *share,
int mode, File data_file,
uint internal_table)
{
@@ -107,7 +107,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
}
if (data_file >= 0)
info.dfile.file= data_file;
- else if (_ma_open_datafile(&info, share, name, -1))
+ else if (_ma_open_datafile(&info, share))
goto err;
errpos= 5;
@@ -253,7 +253,7 @@ MARIA_HA *maria_clone(MARIA_SHARE *share, int mode)
{
MARIA_HA *new_info;
mysql_mutex_lock(&THR_LOCK_maria);
- new_info= maria_clone_internal(share, NullS, mode,
+ new_info= maria_clone_internal(share, mode,
share->data_file_type == BLOCK_RECORD ?
share->bitmap.file.file : -1, 0);
mysql_mutex_unlock(&THR_LOCK_maria);
@@ -299,8 +299,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
realpath_err= my_realpath(name_buff, fn_format(org_name, name, "",
MARIA_NAME_IEXT,
MY_UNPACK_FILENAME),MYF(0));
+ if (realpath_err > 0) /* File not found, no point in looking further. */
+ {
+ DBUG_RETURN(NULL);
+ }
+
if (my_is_symlink(org_name) &&
- (realpath_err || (*maria_test_invalid_symlink)(name_buff)))
+ (realpath_err || mysys_test_invalid_symlink(name_buff)))
{
my_errno= HA_WRONG_CREATE_OPTION;
DBUG_RETURN(0);
@@ -325,13 +330,16 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
my_errno= HA_ERR_CRASHED;
goto err;
});
+ DEBUG_SYNC_C("mi_open_kfile");
if ((kfile=mysql_file_open(key_file_kfile, name_buff,
- (open_mode=O_RDWR) | O_SHARE,MYF(0))) < 0)
+ (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
(kfile=mysql_file_open(key_file_kfile, name_buff,
- (open_mode=O_RDONLY) | O_SHARE,MYF(0))) < 0)
+ (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
goto err;
}
share->mode=open_mode;
@@ -376,7 +384,18 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
(void) strmov(index_name, org_name);
*strrchr(org_name, FN_EXTCHAR)= '\0';
(void) fn_format(data_name,org_name,"",MARIA_NAME_DEXT,
- MY_APPEND_EXT|MY_UNPACK_FILENAME|MY_RESOLVE_SYMLINKS);
+ MY_APPEND_EXT|MY_UNPACK_FILENAME);
+ if (my_is_symlink(data_name))
+ {
+ if (my_realpath(data_name, data_name, MYF(0)))
+ goto err;
+ if (mysys_test_invalid_symlink(data_name))
+ {
+ my_errno= HA_WRONG_CREATE_OPTION;
+ goto err;
+ }
+ share->mode|= O_NOFOLLOW; /* all symlinks are resolved by realpath() */
+ }
info_length=mi_uint2korr(share->state.header.header_length);
base_pos= mi_uint2korr(share->state.header.base_pos);
@@ -853,7 +872,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
if ((share->data_file_type == BLOCK_RECORD ||
share->data_file_type == COMPRESSED_RECORD))
{
- if (_ma_open_datafile(&info, share, name, -1))
+ if (_ma_open_datafile(&info, share))
goto err;
data_file= info.dfile.file;
}
@@ -1025,7 +1044,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
data_file= share->bitmap.file.file; /* Only opened once */
}
- if (!(m_info= maria_clone_internal(share, name, mode, data_file,
+ if (!(m_info= maria_clone_internal(share, mode, data_file,
internal_table)))
goto err;
@@ -1913,35 +1932,15 @@ void _ma_set_index_pagecache_callbacks(PAGECACHE_FILE *file,
Open data file
We can't use dup() here as the data file descriptors need to have different
active seek-positions.
-
- The argument file_to_dup is here for the future if there would on some OS
- exist a dup()-like call that would give us two different file descriptors.
*************************************************************************/
-int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share, const char *org_name,
- File file_to_dup __attribute__((unused)))
+int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share)
{
- char *data_name= share->data_file_name.str;
- char real_data_name[FN_REFLEN];
-
- if (org_name)
- {
- fn_format(real_data_name, org_name, "", MARIA_NAME_DEXT, 4);
- if (my_is_symlink(real_data_name))
- {
- if (my_realpath(real_data_name, real_data_name, MYF(0)) ||
- (*maria_test_invalid_symlink)(real_data_name))
- {
- my_errno= HA_WRONG_CREATE_OPTION;
- return 1;
- }
- data_name= real_data_name;
- }
- }
-
+ myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS : 0);
+ DEBUG_SYNC_C("mi_open_datafile");
info->dfile.file= share->bitmap.file.file=
- mysql_file_open(key_file_dfile, data_name,
- share->mode | O_SHARE, MYF(MY_WME));
+ mysql_file_open(key_file_dfile, share->data_file_name.str,
+ share->mode | O_SHARE, MYF(flags));
return info->dfile.file >= 0 ? 0 : 1;
}
@@ -1955,8 +1954,8 @@ int _ma_open_keyfile(MARIA_SHARE *share)
mysql_mutex_lock(&share->intern_lock);
share->kfile.file= mysql_file_open(key_file_kfile,
share->unique_file_name.str,
- share->mode | O_SHARE,
- MYF(MY_WME));
+ share->mode | O_SHARE | O_NOFOLLOW,
+ MYF(MY_WME | MY_NOSYMLINKS));
mysql_mutex_unlock(&share->intern_lock);
return (share->kfile.file < 0);
}
diff --git a/storage/maria/ma_static.c b/storage/maria/ma_static.c
index 2877f05c8dc..d7caf9edc88 100644
--- a/storage/maria/ma_static.c
+++ b/storage/maria/ma_static.c
@@ -107,12 +107,6 @@ uint32 maria_readnext_vec[]=
SEARCH_BIGGER, SEARCH_SMALLER, SEARCH_SMALLER
};
-static int always_valid(const char *filename __attribute__((unused)))
-{
- return 0;
-}
-
-int (*maria_test_invalid_symlink)(const char *filename)= always_valid;
my_bool (*ma_killed)(MARIA_HA *)= ma_killed_standalone;
#ifdef HAVE_PSI_INTERFACE
diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c
index 0c1c56dfa94..66bf0f5eeb2 100644
--- a/storage/maria/maria_chk.c
+++ b/storage/maria/maria_chk.c
@@ -1275,7 +1275,7 @@ static int maria_chk(HA_CHECK *param, char *filename)
mysql_file_close(info->dfile.file, MYF(MY_WME)); /* Close new file */
error|=maria_change_to_newfile(filename,MARIA_NAME_DEXT,DATA_TMP_EXT,
0, MYF(0));
- if (_ma_open_datafile(info,info->s, NullS, -1))
+ if (_ma_open_datafile(info, info->s))
error=1;
param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */
param->read_cache.file= info->dfile.file;
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index d216384a2aa..154d60a6164 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -1346,8 +1346,7 @@ int _ma_def_scan_restore_pos(MARIA_HA *info, MARIA_RECORD_POS lastpos);
extern MARIA_HA *_ma_test_if_reopen(const char *filename);
my_bool _ma_check_table_is_closed(const char *name, const char *where);
-int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share, const char *org_name,
- File file_to_dup);
+int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share);
int _ma_open_keyfile(MARIA_SHARE *share);
void _ma_setup_functions(register MARIA_SHARE *share);
my_bool _ma_dynmap_file(MARIA_HA *info, my_off_t size);
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 60764bfd696..e363fb1da2b 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009, 2014, SkySQL Ab.
+ Copyright (c) 2009, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -870,59 +870,59 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
{
if (!file) return HA_ADMIN_INTERNAL_ERROR;
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
MYISAM_SHARE* share = file->s;
const char *old_proc_info=thd->proc_info;
- if (!&param)
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
thd_proc_info(thd, "Checking table");
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name = "check";
- param.db_name= table->s->db.str;
- param.table_name= table->alias.c_ptr();
- param.testflag = check_opt->flags | T_CHECK | T_SILENT;
- param.stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
+ myisamchk_init(param);
+ param->thd = thd;
+ param->op_name = "check";
+ param->db_name= table->s->db.str;
+ param->table_name= table->alias.c_ptr();
+ param->testflag = check_opt->flags | T_CHECK | T_SILENT;
+ param->stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
if (!(table->db_stat & HA_READ_ONLY))
- param.testflag|= T_STATISTICS;
- param.using_global_keycache = 1;
+ param->testflag|= T_STATISTICS;
+ param->using_global_keycache = 1;
if (!mi_is_crashed(file) &&
- (((param.testflag & T_CHECK_ONLY_CHANGED) &&
+ (((param->testflag & T_CHECK_ONLY_CHANGED) &&
!(share->state.changed & (STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR)) &&
share->state.open_count == 0) ||
- ((param.testflag & T_FAST) && (share->state.open_count ==
+ ((param->testflag & T_FAST) && (share->state.open_count ==
(uint) (share->global_changed ? 1 : 0)))))
return HA_ADMIN_ALREADY_DONE;
- error = chk_status(&param, file); // Not fatal
- error = chk_size(&param, file);
+ error = chk_status(param, file); // Not fatal
+ error = chk_size(param, file);
if (!error)
- error |= chk_del(&param, file, param.testflag);
+ error |= chk_del(param, file, param->testflag);
if (!error)
- error = chk_key(&param, file);
+ error = chk_key(param, file);
if (!error)
{
- if ((!(param.testflag & T_QUICK) &&
+ if ((!(param->testflag & T_QUICK) &&
((share->options &
(HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ||
- (param.testflag & (T_EXTEND | T_MEDIUM)))) ||
+ (param->testflag & (T_EXTEND | T_MEDIUM)))) ||
mi_is_crashed(file))
{
- ulonglong old_testflag= param.testflag;
- param.testflag|=T_MEDIUM;
- if (!(error= init_io_cache(&param.read_cache, file->dfile,
+ ulonglong old_testflag= param->testflag;
+ param->testflag|=T_MEDIUM;
+ if (!(error= init_io_cache(&param->read_cache, file->dfile,
my_default_record_cache_size, READ_CACHE,
share->pack.header_length, 1, MYF(MY_WME))))
{
- error= chk_data_link(&param, file, MY_TEST(param.testflag & T_EXTEND));
- end_io_cache(&(param.read_cache));
+ error= chk_data_link(param, file, MY_TEST(param->testflag & T_EXTEND));
+ end_io_cache(&param->read_cache);
}
- param.testflag= old_testflag;
+ param->testflag= old_testflag;
}
}
if (!error)
@@ -930,7 +930,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
if ((share->state.changed & (STATE_CHANGED |
STATE_CRASHED_ON_REPAIR |
STATE_CRASHED | STATE_NOT_ANALYZED)) ||
- (param.testflag & T_STATISTICS) ||
+ (param->testflag & T_STATISTICS) ||
mi_is_crashed(file))
{
file->update|=HA_STATE_CHANGED | HA_STATE_ROW_CHANGED;
@@ -938,7 +938,7 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
share->state.changed&= ~(STATE_CHANGED | STATE_CRASHED |
STATE_CRASHED_ON_REPAIR);
if (!(table->db_stat & HA_READ_ONLY))
- error=update_state_info(&param,file,UPDATE_TIME | UPDATE_OPEN_COUNT |
+ error=update_state_info(param,file,UPDATE_TIME | UPDATE_OPEN_COUNT |
UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
@@ -965,30 +965,30 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
{
int error=0;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
MYISAM_SHARE* share = file->s;
- if (!&param)
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name= "analyze";
- param.db_name= table->s->db.str;
- param.table_name= table->alias.c_ptr();
- param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
+ myisamchk_init(param);
+ param->thd = thd;
+ param->op_name= "analyze";
+ param->db_name= table->s->db.str;
+ param->table_name= table->alias.c_ptr();
+ param->testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS |
T_DONT_CHECK_CHECKSUM);
- param.using_global_keycache = 1;
- param.stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
+ param->using_global_keycache = 1;
+ param->stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
if (!(share->state.changed & STATE_NOT_ANALYZED))
return HA_ADMIN_ALREADY_DONE;
- error = chk_key(&param, file);
+ error = chk_key(param, file);
if (!error)
{
mysql_mutex_lock(&share->intern_lock);
- error=update_state_info(&param,file,UPDATE_STAT);
+ error=update_state_info(param,file,UPDATE_STAT);
mysql_mutex_unlock(&share->intern_lock);
}
else if (!mi_is_crashed(file) && !thd->killed)
@@ -1000,38 +1000,38 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt)
int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
{
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
ha_rows start_records;
- if (!file || !&param) return HA_ADMIN_INTERNAL_ERROR;
+ if (!file || !param) return HA_ADMIN_INTERNAL_ERROR;
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name= "repair";
- param.testflag= ((check_opt->flags & ~(T_EXTEND)) |
+ myisamchk_init(param);
+ param->thd = thd;
+ param->op_name= "repair";
+ param->testflag= ((check_opt->flags & ~(T_EXTEND)) |
T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM |
(check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT));
- param.tmpfile_createflag= O_RDWR | O_TRUNC;
- param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
- param.backup_time= check_opt->start_time;
+ param->tmpfile_createflag= O_RDWR | O_TRUNC;
+ param->sort_buffer_length= THDVAR(thd, sort_buffer_size);
+ param->backup_time= check_opt->start_time;
start_records=file->state->records;
- while ((error=repair(thd,param,0)) && param.retry_repair)
+ while ((error=repair(thd,*param,0)) && param->retry_repair)
{
- param.retry_repair=0;
- if (test_all_bits(param.testflag,
+ param->retry_repair=0;
+ if (test_all_bits(param->testflag,
(uint) (T_RETRY_WITHOUT_QUICK | T_QUICK)))
{
- param.testflag&= ~(T_RETRY_WITHOUT_QUICK | T_QUICK);
+ param->testflag&= ~(T_RETRY_WITHOUT_QUICK | T_QUICK);
/* Ensure we don't loose any rows when retrying without quick */
- param.testflag|= T_SAFE_REPAIR;
+ param->testflag|= T_SAFE_REPAIR;
sql_print_information("Retrying repair of: '%s' including modifying data file",
table->s->path.str);
continue;
}
- param.testflag&= ~T_QUICK;
- if ((param.testflag & T_REP_BY_SORT))
+ param->testflag&= ~T_QUICK;
+ if ((param->testflag & T_REP_BY_SORT))
{
- param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP;
+ param->testflag= (param->testflag & ~T_REP_BY_SORT) | T_REP;
sql_print_information("Retrying repair of: '%s' with keycache",
table->s->path.str);
continue;
@@ -1053,23 +1053,23 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt)
int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt)
{
int error;
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
- if (!file || !&param) return HA_ADMIN_INTERNAL_ERROR;
+ if (!file || !param) return HA_ADMIN_INTERNAL_ERROR;
- myisamchk_init(&param);
- param.thd = thd;
- param.op_name= "optimize";
- param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
+ myisamchk_init(param);
+ param->thd = thd;
+ param->op_name= "optimize";
+ param->testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE |
T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX);
- param.tmpfile_createflag= O_RDWR | O_TRUNC;
- param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
- if ((error= repair(thd,param,1)) && param.retry_repair)
+ param->tmpfile_createflag= O_RDWR | O_TRUNC;
+ param->sort_buffer_length= THDVAR(thd, sort_buffer_size);
+ if ((error= repair(thd,*param,1)) && param->retry_repair)
{
sql_print_warning("Warning: Optimize table got errno %d on %s.%s, retrying",
- my_errno, param.db_name, param.table_name);
- param.testflag&= ~T_REP_BY_SORT;
- error= repair(thd,param,1);
+ my_errno, param->db_name, param->table_name);
+ param->testflag&= ~T_REP_BY_SORT;
+ error= repair(thd,*param,1);
}
return error;
}
@@ -1274,17 +1274,17 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt)
if (error != HA_ADMIN_OK)
{
/* Send error to user */
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
- if (!&param)
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
- myisamchk_init(&param);
- param.thd= thd;
- param.op_name= "assign_to_keycache";
- param.db_name= table->s->db.str;
- param.table_name= table->s->table_name.str;
- param.testflag= 0;
- mi_check_print_error(&param, errmsg);
+ myisamchk_init(param);
+ param->thd= thd;
+ param->op_name= "assign_to_keycache";
+ param->db_name= table->s->db.str;
+ param->table_name= table->s->table_name.str;
+ param->testflag= 0;
+ mi_check_print_error(param, errmsg);
}
DBUG_RETURN(error);
}
@@ -1341,16 +1341,16 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt)
err:
{
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
- if (!&param)
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
+ if (!param)
return HA_ADMIN_INTERNAL_ERROR;
- myisamchk_init(&param);
- param.thd= thd;
- param.op_name= "preload_keys";
- param.db_name= table->s->db.str;
- param.table_name= table->s->table_name.str;
- param.testflag= 0;
- mi_check_print_error(&param, errmsg);
+ myisamchk_init(param);
+ param->thd= thd;
+ param->op_name= "preload_keys";
+ param->db_name= table->s->db.str;
+ param->table_name= table->s->table_name.str;
+ param->testflag= 0;
+ mi_check_print_error(param, errmsg);
DBUG_RETURN(error);
}
}
@@ -1455,45 +1455,45 @@ int ha_myisam::enable_indexes(uint mode)
{
THD *thd= table->in_use;
int was_error= thd->is_error();
- HA_CHECK &param= *(HA_CHECK*) thd->alloc(sizeof(param));
+ HA_CHECK *param= (HA_CHECK*) thd->alloc(sizeof *param);
const char *save_proc_info=thd->proc_info;
- if (!&param)
+ if (!param)
DBUG_RETURN(HA_ADMIN_INTERNAL_ERROR);
thd_proc_info(thd, "Creating index");
- myisamchk_init(&param);
- param.op_name= "recreating_index";
- param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
+ myisamchk_init(param);
+ param->op_name= "recreating_index";
+ param->testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS);
/*
Don't lock and unlock table if it's locked.
Normally table should be locked. This test is mostly for safety.
*/
if (likely(file->lock_type != F_UNLCK))
- param.testflag|= T_NO_LOCKS;
-
+ param->testflag|= T_NO_LOCKS;
+
if (file->create_unique_index_by_sort)
- param.testflag|= T_CREATE_UNIQUE_BY_SORT;
+ param->testflag|= T_CREATE_UNIQUE_BY_SORT;
- param.myf_rw&= ~MY_WAIT_IF_FULL;
- param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
- param.stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
- param.tmpdir=&mysql_tmpdir_list;
- if ((error= (repair(thd,param,0) != HA_ADMIN_OK)) && param.retry_repair)
+ param->myf_rw&= ~MY_WAIT_IF_FULL;
+ param->sort_buffer_length= THDVAR(thd, sort_buffer_size);
+ param->stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
+ param->tmpdir=&mysql_tmpdir_list;
+ if ((error= (repair(thd,*param,0) != HA_ADMIN_OK)) && param->retry_repair)
{
sql_print_warning("Warning: Enabling keys got errno %d on %s.%s, retrying",
- my_errno, param.db_name, param.table_name);
+ my_errno, param->db_name, param->table_name);
/*
Repairing by sort failed. Now try standard repair method.
Still we want to fix only index file. If data file corruption
was detected (T_RETRY_WITHOUT_QUICK), we shouldn't do much here.
Let implicit repair do this job.
*/
- if (!(param.testflag & T_RETRY_WITHOUT_QUICK))
+ if (!(param->testflag & T_RETRY_WITHOUT_QUICK))
{
- param.testflag&= ~T_REP_BY_SORT;
- error= (repair(thd,param,0) != HA_ADMIN_OK);
+ param->testflag&= ~T_REP_BY_SORT;
+ error= (repair(thd,*param,0) != HA_ADMIN_OK);
}
/*
If the standard repair succeeded, clear all error messages which
@@ -1896,15 +1896,22 @@ int ha_myisam::info(uint flag)
Set data_file_name and index_file_name to point at the symlink value
if table is symlinked (Ie; Real name is not same as generated name)
*/
+ char buf[FN_REFLEN];
data_file_name= index_file_name= 0;
fn_format(name_buff, file->filename, "", MI_NAME_DEXT,
MY_APPEND_EXT | MY_UNPACK_FILENAME);
- if (strcmp(name_buff, misam_info.data_file_name))
- data_file_name=misam_info.data_file_name;
+ if (my_is_symlink(name_buff))
+ {
+ my_readlink(buf, name_buff, MYF(0));
+ data_file_name= ha_thd()->strdup(buf);
+ }
fn_format(name_buff, file->filename, "", MI_NAME_IEXT,
MY_APPEND_EXT | MY_UNPACK_FILENAME);
- if (strcmp(name_buff, misam_info.index_file_name))
- index_file_name=misam_info.index_file_name;
+ if (my_is_symlink(name_buff))
+ {
+ my_readlink(buf, name_buff, MYF(0));
+ index_file_name= ha_thd()->strdup(buf);
+ }
}
if (flag & HA_STATUS_ERRKEY)
{
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index bab0ad2b6a4..3d6e6297d3b 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -75,7 +75,7 @@ static int sort_delete_record(MI_SORT_PARAM *sort_param);
static SORT_KEY_BLOCKS *alloc_key_blocks(HA_CHECK *, uint, uint);
static ha_checksum mi_byte_checksum(const uchar *buf, uint length);
static void set_data_file_type(MI_SORT_INFO *sort_info, MYISAM_SHARE *share);
-static int replace_data_file(HA_CHECK *, MI_INFO *, const char *, File);
+static int replace_data_file(HA_CHECK *param, MI_INFO *info, File new_file);
void myisamchk_init(HA_CHECK *param)
{
@@ -1708,7 +1708,7 @@ err:
/* Replace the actual file with the temporary file */
if (new_file >= 0)
{
- got_error= replace_data_file(param, info, name, new_file);
+ got_error= replace_data_file(param, info, new_file);
new_file= -1;
param->retry_repair= 0;
}
@@ -2522,7 +2522,7 @@ err:
/* Replace the actual file with the temporary file */
if (new_file >= 0)
{
- got_error= replace_data_file(param, info, name, new_file);
+ got_error= replace_data_file(param, info, new_file);
new_file= -1;
}
}
@@ -2536,7 +2536,7 @@ err:
(void) mysql_file_delete(mi_key_file_datatmp,
param->temp_filename, MYF(MY_WME));
if (info->dfile == new_file) /* Retry with key cache */
- if (unlikely(mi_open_datafile(info, share, name, -1)))
+ if (unlikely(mi_open_datafile(info, share)))
param->retry_repair= 0; /* Safety */
}
mi_mark_crashed_on_repair(info);
@@ -3061,7 +3061,7 @@ err:
/* Replace the actual file with the temporary file */
if (new_file >= 0)
{
- got_error= replace_data_file(param, info, name, new_file);
+ got_error= replace_data_file(param, info, new_file);
new_file= -1;
}
}
@@ -3075,7 +3075,7 @@ err:
(void) mysql_file_delete(mi_key_file_datatmp,
param->temp_filename, MYF(MY_WME));
if (info->dfile == new_file) /* Retry with key cache */
- if (unlikely(mi_open_datafile(info, share, name, -1)))
+ if (unlikely(mi_open_datafile(info, share)))
param->retry_repair= 0; /* Safety */
}
mi_mark_crashed_on_repair(info);
@@ -4757,8 +4757,7 @@ int mi_make_backup_of_index(MI_INFO *info, time_t backup_time, myf flags)
}
-static int replace_data_file(HA_CHECK *param, MI_INFO *info,
- const char *name, File new_file)
+static int replace_data_file(HA_CHECK *param, MI_INFO *info, File new_file)
{
MYISAM_SHARE *share=info->s;
@@ -4794,7 +4793,7 @@ static int replace_data_file(HA_CHECK *param, MI_INFO *info,
DATA_TMP_EXT, param->backup_time,
(param->testflag & T_BACKUP_DATA ?
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
- mi_open_datafile(info, share, name, -1))
+ mi_open_datafile(info, share))
return 1;
return 0;
}
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index c781538dc0f..8c02674fba5 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -46,7 +46,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
uint aligned_key_start, block_length, res;
uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
- char filename[FN_REFLEN],linkname[FN_REFLEN], *linkname_ptr;
+ char kfilename[FN_REFLEN],klinkname[FN_REFLEN], *klinkname_ptr;
+ char dfilename[FN_REFLEN],dlinkname[FN_REFLEN], *dlinkname_ptr;
ulong pack_reclength;
ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
@@ -594,19 +595,19 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->index_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->index_file_name, MI_NAME_IEXT,
+ fn_format(kfilename, name, ci->index_file_name, MI_NAME_IEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME |
MY_RETURN_REAL_PATH | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->index_file_name, "", MI_NAME_IEXT,
+ fn_format(kfilename, ci->index_file_name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "", MI_NAME_IEXT,
+ fn_format(klinkname, name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME|MY_APPEND_EXT);
- linkname_ptr=linkname;
+ klinkname_ptr= klinkname;
/*
Don't create the table if the link or file exists to ensure that one
doesn't accidently destroy another table.
@@ -617,10 +618,10 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
{
char *iext= strrchr(name, '.');
int have_iext= iext && !strcmp(iext, MI_NAME_IEXT);
- fn_format(filename, name, "", MI_NAME_IEXT,
+ fn_format(kfilename, name, "", MI_NAME_IEXT,
MY_UNPACK_FILENAME | MY_RETURN_REAL_PATH |
(have_iext ? MY_REPLACE_EXT : MY_APPEND_EXT));
- linkname_ptr=0;
+ klinkname_ptr= 0;
/* Replace the current file */
create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
@@ -635,7 +636,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
NOTE: The filename is compared against unique_file_name of every
open table. Hence we need a real path here.
*/
- if (!internal_table && test_if_reopen(filename))
+ if (!internal_table && test_if_reopen(kfilename))
{
my_printf_error(HA_ERR_TABLE_EXIST, "MyISAM table '%s' is in use "
"(most likely by a MERGE table). Try FLUSH TABLES.",
@@ -645,7 +646,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
}
if ((file= mysql_file_create_with_symlink(mi_key_file_kfile,
- linkname_ptr, filename, 0,
+ klinkname_ptr, kfilename, 0,
create_mode,
MYF(MY_WME | create_flag))) < 0)
goto err;
@@ -665,31 +666,31 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
/* chop off the table name, tempory tables use generated name */
if ((path= strrchr(ci->data_file_name, FN_LIBCHAR)))
*path= '\0';
- fn_format(filename, name, ci->data_file_name, MI_NAME_DEXT,
+ fn_format(dfilename, name, ci->data_file_name, MI_NAME_DEXT,
MY_REPLACE_DIR | MY_UNPACK_FILENAME | MY_APPEND_EXT);
}
else
{
- fn_format(filename, ci->data_file_name, "", MI_NAME_DEXT,
+ fn_format(dfilename, ci->data_file_name, "", MI_NAME_DEXT,
MY_UNPACK_FILENAME |
(have_dext ? MY_REPLACE_EXT : MY_APPEND_EXT));
}
- fn_format(linkname, name, "",MI_NAME_DEXT,
+ fn_format(dlinkname, name, "",MI_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr=linkname;
+ dlinkname_ptr= dlinkname;
create_flag=0;
}
else
{
- fn_format(filename,name,"", MI_NAME_DEXT,
+ fn_format(dfilename,name,"", MI_NAME_DEXT,
MY_UNPACK_FILENAME | MY_APPEND_EXT);
- linkname_ptr=0;
+ dlinkname_ptr= 0;
create_flag=(flags & HA_CREATE_KEEP_FILES) ? 0 : MY_DELETE_OLD;
}
if ((dfile=
mysql_file_create_with_symlink(mi_key_file_dfile,
- linkname_ptr, filename, 0,
+ dlinkname_ptr, dfilename, 0,
create_mode,
MYF(MY_WME | create_flag))) < 0)
goto err;
@@ -842,19 +843,21 @@ err_no_lock:
(void) mysql_file_close(dfile, MYF(0));
/* fall through */
case 2:
- if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(mi_key_file_dfile,
- fn_format(filename, name, "", MI_NAME_DEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- MYF(0));
+ if (! (flags & HA_DONT_TOUCH_DATA))
+ {
+ mysql_file_delete(mi_key_file_dfile, dfilename, MYF(0));
+ if (dlinkname_ptr)
+ mysql_file_delete(mi_key_file_dfile, dlinkname_ptr, MYF(0));
+ }
/* fall through */
case 1:
(void) mysql_file_close(file, MYF(0));
if (! (flags & HA_DONT_TOUCH_DATA))
- mysql_file_delete_with_symlink(mi_key_file_kfile,
- fn_format(filename, name, "", MI_NAME_IEXT,
- MY_UNPACK_FILENAME | MY_APPEND_EXT),
- MYF(0));
+ {
+ mysql_file_delete(mi_key_file_kfile, kfilename, MYF(0));
+ if (klinkname_ptr)
+ mysql_file_delete(mi_key_file_kfile, klinkname_ptr, MYF(0));
+ }
}
my_free(rec_per_key_part);
DBUG_RETURN(my_errno=save_errno); /* return the fatal errno */
diff --git a/storage/myisam/mi_delete_table.c b/storage/myisam/mi_delete_table.c
index 7da960011ca..3422e6b045d 100644
--- a/storage/myisam/mi_delete_table.c
+++ b/storage/myisam/mi_delete_table.c
@@ -26,47 +26,22 @@
#define mi_key_file_dfile 0
#endif
-static int delete_one_file(const char *name, const char *ext,
- PSI_file_key pskey __attribute__((unused)),
- myf flags)
-{
- char from[FN_REFLEN];
- DBUG_ENTER("delete_one_file");
- fn_format(from,name, "", ext, MY_UNPACK_FILENAME | MY_APPEND_EXT);
- if (my_is_symlink(from) && (*myisam_test_invalid_symlink)(from))
- {
- /*
- Symlink is pointing to file in data directory.
- Remove symlink, keep file.
- */
- if (mysql_file_delete(pskey, from, flags))
- DBUG_RETURN(my_errno);
- }
- else
- {
- if (mysql_file_delete_with_symlink(pskey, from, flags))
- DBUG_RETURN(my_errno);
- }
- DBUG_RETURN(0);
-}
-
int mi_delete_table(const char *name)
{
- int res;
DBUG_ENTER("mi_delete_table");
#ifdef EXTRA_DEBUG
check_table_is_closed(name,"delete");
#endif
- if ((res= delete_one_file(name, MI_NAME_IEXT, mi_key_file_kfile, MYF(MY_WME))))
- DBUG_RETURN(res);
- if ((res= delete_one_file(name, MI_NAME_DEXT, mi_key_file_dfile, MYF(MY_WME))))
- DBUG_RETURN(res);
+ if (my_handler_delete_with_symlink(mi_key_file_kfile, name, MI_NAME_IEXT, MYF(MY_WME)) ||
+ my_handler_delete_with_symlink(mi_key_file_dfile, name, MI_NAME_DEXT, MYF(MY_WME)))
+ DBUG_RETURN(my_errno);
+
// optionally present:
- delete_one_file(name, ".OLD", mi_key_file_dfile, MYF(0));
- delete_one_file(name, ".TMD", mi_key_file_dfile, MYF(0));
+ my_handler_delete_with_symlink(mi_key_file_dfile, name, ".OLD", MYF(0));
+ my_handler_delete_with_symlink(mi_key_file_dfile, name, ".TMD", MYF(0));
DBUG_RETURN(0);
}
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index 2d794f611d7..354fb0a8e04 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -104,8 +104,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
realpath_err= my_realpath(name_buff,
fn_format(org_name,name,"",MI_NAME_IEXT,4),MYF(0));
+ if (realpath_err > 0) /* File not found, no point in looking further. */
+ {
+ DBUG_RETURN(NULL);
+ }
+
if (my_is_symlink(org_name) &&
- (realpath_err || (*myisam_test_invalid_symlink)(name_buff)))
+ (realpath_err || mysys_test_invalid_symlink(name_buff)))
{
my_errno= HA_WRONG_CREATE_OPTION;
DBUG_RETURN (NULL);
@@ -131,15 +136,17 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
my_errno= HA_ERR_CRASHED;
goto err;
});
- if ((kfile= mysql_file_open(mi_key_file_kfile,
- name_buff,
- (open_mode= O_RDWR) | O_SHARE, MYF(0))) < 0)
+
+ DEBUG_SYNC_C("mi_open_kfile");
+ if ((kfile= mysql_file_open(mi_key_file_kfile, name_buff,
+ (open_mode= O_RDWR) | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
{
if ((errno != EROFS && errno != EACCES) ||
mode != O_RDONLY ||
- (kfile= mysql_file_open(mi_key_file_kfile,
- name_buff,
- (open_mode= O_RDONLY) | O_SHARE, MYF(0))) < 0)
+ (kfile= mysql_file_open(mi_key_file_kfile, name_buff,
+ (open_mode= O_RDONLY) | O_SHARE| O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS))) < 0)
goto err;
}
share->mode=open_mode;
@@ -183,7 +190,18 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
(void) strmov(index_name, org_name);
*strrchr(org_name, '.')= '\0';
(void) fn_format(data_name,org_name,"",MI_NAME_DEXT,
- MY_APPEND_EXT|MY_UNPACK_FILENAME|MY_RESOLVE_SYMLINKS);
+ MY_APPEND_EXT|MY_UNPACK_FILENAME);
+ if (my_is_symlink(data_name))
+ {
+ if (my_realpath(data_name, data_name, MYF(0)))
+ goto err;
+ if (mysys_test_invalid_symlink(data_name))
+ {
+ my_errno= HA_WRONG_CREATE_OPTION;
+ goto err;
+ }
+ share->mode|= O_NOFOLLOW; /* all symlinks are resolved by realpath() */
+ }
info_length=mi_uint2korr(share->state.header.header_length);
base_pos=mi_uint2korr(share->state.header.base_pos);
@@ -497,7 +515,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
lock_error=1; /* Database unlocked */
}
- if (mi_open_datafile(&info, share, name, -1))
+ if (mi_open_datafile(&info, share))
goto err;
errpos=5;
@@ -578,7 +596,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
my_errno=EACCES; /* Can't open in write mode */
goto err;
}
- if (mi_open_datafile(&info, share, name, old_info->dfile))
+ if (mi_open_datafile(&info, share))
goto err;
errpos=5;
have_rtree= old_info->rtree_recursion_state != NULL;
@@ -1248,33 +1266,14 @@ uchar *mi_recinfo_read(uchar *ptr, MI_COLUMNDEF *recinfo)
Open data file.
We can't use dup() here as the data file descriptors need to have different
active seek-positions.
-
-The argument file_to_dup is here for the future if there would on some OS
-exist a dup()-like call that would give us two different file descriptors.
*************************************************************************/
-int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *org_name,
- File file_to_dup __attribute__((unused)))
+int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share)
{
- char *data_name= share->data_file_name;
- char real_data_name[FN_REFLEN];
-
- if (org_name)
- {
- fn_format(real_data_name,org_name,"",MI_NAME_DEXT,4);
- if (my_is_symlink(real_data_name))
- {
- if (my_realpath(real_data_name, real_data_name, MYF(0)) ||
- (*myisam_test_invalid_symlink)(real_data_name))
- {
- my_errno= HA_WRONG_CREATE_OPTION;
- return 1;
- }
- data_name= real_data_name;
- }
- }
- info->dfile= mysql_file_open(mi_key_file_dfile,
- data_name, share->mode | O_SHARE, MYF(MY_WME));
+ myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS: 0);
+ DEBUG_SYNC_C("mi_open_datafile");
+ info->dfile= mysql_file_open(mi_key_file_dfile, share->data_file_name,
+ share->mode | O_SHARE, MYF(flags));
return info->dfile >= 0 ? 0 : 1;
}
@@ -1283,8 +1282,8 @@ int mi_open_keyfile(MYISAM_SHARE *share)
{
if ((share->kfile= mysql_file_open(mi_key_file_kfile,
share->unique_file_name,
- share->mode | O_SHARE,
- MYF(MY_WME))) < 0)
+ share->mode | O_SHARE | O_NOFOLLOW,
+ MYF(MY_NOSYMLINKS | MY_WME))) < 0)
return 1;
return 0;
}
diff --git a/storage/myisam/mi_static.c b/storage/myisam/mi_static.c
index d77f4f6b8e2..49019fb861c 100644
--- a/storage/myisam/mi_static.c
+++ b/storage/myisam/mi_static.c
@@ -42,14 +42,6 @@ ulong myisam_data_pointer_size=4;
ulonglong myisam_mmap_size= SIZE_T_MAX, myisam_mmap_used= 0;
my_bool (*mi_killed)(MI_INFO *)= mi_killed_standalone;
-static int always_valid(const char *filename __attribute__((unused)))
-{
- return 0;
-}
-
-int (*myisam_test_invalid_symlink)(const char *filename)= always_valid;
-
-
/*
read_vec[] is used for converting between P_READ_KEY.. and SEARCH_
Position is , == , >= , <= , > , <
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index 7835ab83531..edbe235e190 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -1047,7 +1047,7 @@ static int myisamchk(HA_CHECK *param, char * filename)
MYF(MY_WME)); /* Close new file */
error|=change_to_newfile(filename, MI_NAME_DEXT, DATA_TMP_EXT,
0, MYF(0));
- if (mi_open_datafile(info,info->s, NULL, -1))
+ if (mi_open_datafile(info, info->s))
error=1;
param->out_flag&= ~O_NEW_DATA; /* We are using new datafile */
param->read_cache.file=info->dfile;
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index 336f1170d29..9d94a26d30c 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -713,8 +713,7 @@ void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
my_bool all_keys);
extern MI_INFO *test_if_reopen(char *filename);
my_bool check_table_is_closed(const char *name, const char *where);
-int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *orn_name,
- File file_to_dup);
+int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share);
int mi_open_keyfile(MYISAM_SHARE *share);
void mi_setup_functions(register MYISAM_SHARE *share);
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index 50147575b94..041adc51abc 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.34-79.1)
+SET(TOKUDB_VERSION 5.6.35-80.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc
index 30a8710d7aa..ad9ecb1d074 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.cc
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc
@@ -651,10 +651,8 @@ void toku_ftnode_clone_callback(void *value_data,
// set new pair attr if necessary
if (node->height == 0) {
*new_attr = make_ftnode_pair_attr(node);
- for (int i = 0; i < node->n_children; i++) {
- BLB(node, i)->logical_rows_delta = 0;
- BLB(cloned_node, i)->logical_rows_delta = 0;
- }
+ node->logical_rows_delta = 0;
+ cloned_node->logical_rows_delta = 0;
} else {
new_attr->is_valid = false;
}
@@ -702,6 +700,10 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
if (ftnode->height == 0) {
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF, 1);
FT_STATUS_INC(FT_FULL_EVICTIONS_LEAF_BYTES, node_size);
+ if (!ftnode->dirty) {
+ toku_ft_adjust_logical_row_count(
+ ft, -ftnode->logical_rows_delta);
+ }
} else {
FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF, 1);
FT_STATUS_INC(FT_FULL_EVICTIONS_NONLEAF_BYTES, node_size);
@@ -714,11 +716,12 @@ void toku_ftnode_flush_callback(CACHEFILE UU(cachefile),
BASEMENTNODE bn = BLB(ftnode, i);
toku_ft_decrease_stats(&ft->in_memory_stats,
bn->stat64_delta);
- if (!ftnode->dirty)
- toku_ft_adjust_logical_row_count(
- ft, -bn->logical_rows_delta);
}
}
+ if (!ftnode->dirty) {
+ toku_ft_adjust_logical_row_count(
+ ft, -ftnode->logical_rows_delta);
+ }
}
}
toku_ftnode_free(&ftnode);
@@ -944,8 +947,6 @@ int toku_ftnode_pe_callback(void *ftnode_pv,
basements_to_destroy[num_basements_to_destroy++] = bn;
toku_ft_decrease_stats(&ft->in_memory_stats,
bn->stat64_delta);
- toku_ft_adjust_logical_row_count(ft,
- -bn->logical_rows_delta);
set_BNULL(node, i);
BP_STATE(node, i) = PT_ON_DISK;
num_partial_evictions++;
@@ -2652,7 +2653,7 @@ static std::unique_ptr<char[], decltype(&toku_free)> toku_file_get_parent_dir(
return result;
}
-static bool toku_create_subdirs_if_needed(const char *path) {
+bool toku_create_subdirs_if_needed(const char *path) {
static const mode_t dir_mode = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP |
S_IWGRP | S_IXGRP | S_IROTH | S_IXOTH;
@@ -4563,6 +4564,8 @@ int toku_ft_rename_iname(DB_TXN *txn,
bs_new_name);
}
+ if (!toku_create_subdirs_if_needed(new_iname_full.get()))
+ return get_error_errno();
r = toku_os_rename(old_iname_full.get(), new_iname_full.get());
if (r != 0)
return r;
diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.h b/storage/tokudb/PerconaFT/ft/ft-ops.h
index 70cf045d43c..df8ffe287df 100644
--- a/storage/tokudb/PerconaFT/ft/ft-ops.h
+++ b/storage/tokudb/PerconaFT/ft/ft-ops.h
@@ -288,3 +288,8 @@ void toku_ft_set_direct_io(bool direct_io_on);
void toku_ft_set_compress_buffers_before_eviction(bool compress_buffers);
void toku_note_deserialized_basement_node(bool fixed_key_size);
+
+// Creates all directories for the path if necessary,
+// returns true if all dirs are created successfully or
+// all dirs exist, false otherwise.
+bool toku_create_subdirs_if_needed(const char* path);
diff --git a/storage/tokudb/PerconaFT/ft/logger/recover.cc b/storage/tokudb/PerconaFT/ft/logger/recover.cc
index a9c30c0e37a..9eaa56bdc53 100644
--- a/storage/tokudb/PerconaFT/ft/logger/recover.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/recover.cc
@@ -987,7 +987,8 @@ static int toku_recover_frename(struct logtype_frename *l, RECOVER_ENV renv) {
return 1;
if (old_exist && !new_exist &&
- (toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
+ (!toku_create_subdirs_if_needed(new_iname_full.get()) ||
+ toku_os_rename(old_iname_full.get(), new_iname_full.get()) == -1 ||
toku_fsync_directory(old_iname_full.get()) == -1 ||
toku_fsync_directory(new_iname_full.get()) == -1))
return 1;
diff --git a/storage/tokudb/PerconaFT/ft/node.cc b/storage/tokudb/PerconaFT/ft/node.cc
index 12e5fda226e..07309ff7f94 100644
--- a/storage/tokudb/PerconaFT/ft/node.cc
+++ b/storage/tokudb/PerconaFT/ft/node.cc
@@ -386,7 +386,8 @@ static void bnc_apply_messages_to_basement_node(
const pivot_bounds &
bounds, // contains pivot key bounds of this basement node
txn_gc_info *gc_info,
- bool *msgs_applied) {
+ bool *msgs_applied,
+ int64_t* logical_rows_delta) {
int r;
NONLEAF_CHILDINFO bnc = BNC(ancestor, childnum);
@@ -394,7 +395,6 @@ static void bnc_apply_messages_to_basement_node(
// apply messages from this buffer
STAT64INFO_S stats_delta = {0, 0};
uint64_t workdone_this_ancestor = 0;
- int64_t logical_rows_delta = 0;
uint32_t stale_lbi, stale_ube;
if (!bn->stale_ancestor_messages_applied) {
@@ -470,7 +470,7 @@ static void bnc_apply_messages_to_basement_node(
gc_info,
&workdone_this_ancestor,
&stats_delta,
- &logical_rows_delta);
+ logical_rows_delta);
}
} else if (stale_lbi == stale_ube) {
// No stale messages to apply, we just apply fresh messages, and mark
@@ -482,7 +482,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
- .logical_rows_delta = &logical_rows_delta};
+ .logical_rows_delta = logical_rows_delta};
if (fresh_ube - fresh_lbi > 0)
*msgs_applied = true;
r = bnc->fresh_message_tree
@@ -503,7 +503,7 @@ static void bnc_apply_messages_to_basement_node(
.gc_info = gc_info,
.workdone = &workdone_this_ancestor,
.stats_to_update = &stats_delta,
- .logical_rows_delta = &logical_rows_delta};
+ .logical_rows_delta = logical_rows_delta};
r = bnc->stale_message_tree
.iterate_on_range<struct iterate_do_bn_apply_msg_extra,
@@ -521,8 +521,6 @@ static void bnc_apply_messages_to_basement_node(
if (stats_delta.numbytes || stats_delta.numrows) {
toku_ft_update_stats(&t->ft->in_memory_stats, stats_delta);
}
- toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
- bn->logical_rows_delta += logical_rows_delta;
}
static void
@@ -536,6 +534,7 @@ apply_ancestors_messages_to_bn(
bool* msgs_applied
)
{
+ int64_t logical_rows_delta = 0;
BASEMENTNODE curr_bn = BLB(node, childnum);
const pivot_bounds curr_bounds = bounds.next_bounds(node, childnum);
for (ANCESTORS curr_ancestors = ancestors; curr_ancestors; curr_ancestors = curr_ancestors->next) {
@@ -548,13 +547,16 @@ apply_ancestors_messages_to_bn(
curr_ancestors->childnum,
curr_bounds,
gc_info,
- msgs_applied
+ msgs_applied,
+ &logical_rows_delta
);
// We don't want to check this ancestor node again if the
// next time we query it, the msn hasn't changed.
curr_bn->max_msn_applied = curr_ancestors->node->max_msn_applied_to_node_on_disk;
}
}
+ toku_ft_adjust_logical_row_count(t->ft, logical_rows_delta);
+ node->logical_rows_delta += logical_rows_delta;
// At this point, we know all the stale messages above this
// basement node have been applied, and any new messages will be
// fresh, so we don't need to look at stale messages for this
diff --git a/storage/tokudb/PerconaFT/ft/node.h b/storage/tokudb/PerconaFT/ft/node.h
index 52eefec0936..db189e36d59 100644
--- a/storage/tokudb/PerconaFT/ft/node.h
+++ b/storage/tokudb/PerconaFT/ft/node.h
@@ -157,36 +157,49 @@ private:
// TODO: class me up
struct ftnode {
- MSN max_msn_applied_to_node_on_disk; // max_msn_applied that will be written to disk
+ // max_msn_applied that will be written to disk
+ MSN max_msn_applied_to_node_on_disk;
unsigned int flags;
- BLOCKNUM blocknum; // Which block number is this node?
- int layout_version; // What version of the data structure?
- int layout_version_original; // different (<) from layout_version if upgraded from a previous version (useful for debugging)
- int layout_version_read_from_disk; // transient, not serialized to disk, (useful for debugging)
- uint32_t build_id; // build_id (svn rev number) of software that wrote this node to disk
- int height; /* height is always >= 0. 0 for leaf, >0 for nonleaf. */
- int dirty;
+ // Which block number is this node?
+ BLOCKNUM blocknum;
+ // What version of the data structure?
+ int layout_version;
+ // different (<) from layout_version if upgraded from a previous version
+ // (useful for debugging)
+ int layout_version_original;
+ // transient, not serialized to disk, (useful for debugging)
+ int layout_version_read_from_disk;
+ // build_id (svn rev number) of software that wrote this node to disk
+ uint32_t build_id;
+ // height is always >= 0. 0 for leaf, >0 for nonleaf.
+ int height;
+ int dirty;
uint32_t fullhash;
+ // current count of rows add or removed as a result of message application
+ // to this node as a basement, irrelevant for internal nodes, gets reset
+ // when node is undirtied. Used to back out tree scoped LRC id node is
+ // evicted but not persisted
+ int64_t logical_rows_delta;
- // for internal nodes, if n_children==fanout+1 then the tree needs to be rebalanced.
- // for leaf nodes, represents number of basement nodes
+ // for internal nodes, if n_children==fanout+1 then the tree needs to be
+ // rebalanced. for leaf nodes, represents number of basement nodes
int n_children;
ftnode_pivot_keys pivotkeys;
- // What's the oldest referenced xid that this node knows about? The real oldest
- // referenced xid might be younger, but this is our best estimate. We use it
- // as a heuristic to transition provisional mvcc entries from provisional to
- // committed (from implicity committed to really committed).
+ // What's the oldest referenced xid that this node knows about? The real
+ // oldest referenced xid might be younger, but this is our best estimate.
+ // We use it as a heuristic to transition provisional mvcc entries from
+ // provisional to committed (from implicity committed to really committed).
//
- // A better heuristic would be the oldest live txnid, but we use this since it
- // still works well most of the time, and its readily available on the inject
- // code path.
+ // A better heuristic would be the oldest live txnid, but we use this since
+ // it still works well most of the time, and its readily available on the
+ // inject code path.
TXNID oldest_referenced_xid_known;
// array of size n_children, consisting of ftnode partitions
- // each one is associated with a child
- // for internal nodes, the ith partition corresponds to the ith message buffer
- // for leaf nodes, the ith partition corresponds to the ith basement node
+ // each one is associated with a child for internal nodes, the ith
+ // partition corresponds to the ith message buffer for leaf nodes, the ith
+ // partition corresponds to the ith basement node
struct ftnode_partition *bp;
struct ctpair *ct_pair;
};
@@ -199,7 +212,6 @@ struct ftnode_leaf_basement_node {
MSN max_msn_applied; // max message sequence number applied
bool stale_ancestor_messages_applied;
STAT64INFO_S stat64_delta; // change in stat64 counters since basement was last written to disk
- int64_t logical_rows_delta;
};
typedef struct ftnode_leaf_basement_node *BASEMENTNODE;
diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
index 5914f8a1050..56876b474d4 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
+++ b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc
@@ -996,7 +996,6 @@ BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn) {
bn->seqinsert = orig_bn->seqinsert;
bn->stale_ancestor_messages_applied = orig_bn->stale_ancestor_messages_applied;
bn->stat64_delta = orig_bn->stat64_delta;
- bn->logical_rows_delta = orig_bn->logical_rows_delta;
bn->data_buffer.clone(&orig_bn->data_buffer);
return bn;
}
@@ -1007,7 +1006,6 @@ BASEMENTNODE toku_create_empty_bn_no_buffer(void) {
bn->seqinsert = 0;
bn->stale_ancestor_messages_applied = false;
bn->stat64_delta = ZEROSTATS;
- bn->logical_rows_delta = 0;
bn->data_buffer.init_zero();
return bn;
}
@@ -1432,6 +1430,7 @@ static FTNODE alloc_ftnode_for_deserialize(uint32_t fullhash, BLOCKNUM blocknum)
node->fullhash = fullhash;
node->blocknum = blocknum;
node->dirty = 0;
+ node->logical_rows_delta = 0;
node->bp = nullptr;
node->oldest_referenced_xid_known = TXNID_NONE;
return node;
diff --git a/storage/tokudb/PerconaFT/ft/txn/roll.cc b/storage/tokudb/PerconaFT/ft/txn/roll.cc
index 9f3977743a0..4f374d62173 100644
--- a/storage/tokudb/PerconaFT/ft/txn/roll.cc
+++ b/storage/tokudb/PerconaFT/ft/txn/roll.cc
@@ -227,7 +227,8 @@ int toku_rollback_frename(BYTESTRING old_iname,
return 1;
if (!old_exist && new_exist &&
- (toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
+ (!toku_create_subdirs_if_needed(old_iname_full.get()) ||
+ toku_os_rename(new_iname_full.get(), old_iname_full.get()) == -1 ||
toku_fsync_directory(new_iname_full.get()) == -1 ||
toku_fsync_directory(old_iname_full.get()) == -1))
return 1;
diff --git a/storage/tokudb/PerconaFT/util/dmt.h b/storage/tokudb/PerconaFT/util/dmt.h
index 71cde8814ab..99be296d0e9 100644
--- a/storage/tokudb/PerconaFT/util/dmt.h
+++ b/storage/tokudb/PerconaFT/util/dmt.h
@@ -589,7 +589,6 @@ private:
void convert_from_tree_to_array(void);
- __attribute__((nonnull(2,5)))
void delete_internal(subtree *const subtreep, const uint32_t idx, subtree *const subtree_replace, subtree **const rebalance_subtree);
template<typename iterate_extra_t,
@@ -627,16 +626,12 @@ private:
__attribute__((nonnull))
void rebalance(subtree *const subtree);
- __attribute__((nonnull(3)))
static void copyout(uint32_t *const outlen, dmtdata_t *const out, const dmt_node *const n);
- __attribute__((nonnull(3)))
static void copyout(uint32_t *const outlen, dmtdata_t **const out, dmt_node *const n);
- __attribute__((nonnull(4)))
static void copyout(uint32_t *const outlen, dmtdata_t *const out, const uint32_t len, const dmtdata_t *const stored_value_ptr);
- __attribute__((nonnull(4)))
static void copyout(uint32_t *const outlen, dmtdata_t **const out, const uint32_t len, dmtdata_t *const stored_value_ptr);
template<typename dmtcmp_t,
diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h
index 799ed0eae7c..c7ed2ca546f 100644
--- a/storage/tokudb/PerconaFT/util/omt.h
+++ b/storage/tokudb/PerconaFT/util/omt.h
@@ -284,7 +284,6 @@ public:
* By taking ownership of the array, we save a malloc and memcpy,
* and possibly a free (if the caller is done with the array).
*/
- __attribute__((nonnull))
void create_steal_sorted_array(omtdata_t **const values, const uint32_t numvalues, const uint32_t new_capacity);
/**
@@ -667,7 +666,6 @@ private:
void set_at_internal(const subtree &subtree, const omtdata_t &value, const uint32_t idx);
- __attribute__((nonnull(2,5)))
void delete_internal(subtree *const subtreep, const uint32_t idx, omt_node *const copyn, subtree **const rebalance_subtree);
template<typename iterate_extra_t,
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 54dff2a4b3b..53dc2d20bb1 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -29,6 +29,7 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "tokudb_status.h"
#include "tokudb_card.h"
#include "ha_tokudb.h"
+#include "sql_db.h"
#if TOKU_INCLUDE_EXTENDED_KEYS
@@ -6122,8 +6123,6 @@ int ha_tokudb::info(uint flag) {
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;
- TOKU_DB_FRAGMENTATION_S frag_info;
- memset(&frag_info, 0, sizeof frag_info);
error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
if (error) {
@@ -6140,11 +6139,6 @@ int ha_tokudb::info(uint flag) {
} else {
goto cleanup;
}
- error = share->file->get_fragmentation(share->file, &frag_info);
- if (error) {
- goto cleanup;
- }
- stats.delete_length = frag_info.unused_bytes;
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
@@ -6156,6 +6150,7 @@ int ha_tokudb::info(uint flag) {
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
stats.data_file_length = dict_stats.bt_dsize;
+ stats.delete_length = dict_stats.bt_fsize - dict_stats.bt_dsize;
if (hidden_primary_key) {
//
// in this case, we have a hidden primary key, do not
@@ -6191,30 +6186,21 @@ int ha_tokudb::info(uint flag) {
//
// this solution is much simpler than trying to maintain an
// accurate number of valid keys at the handlerton layer.
- uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
+ uint curr_num_DBs =
+ table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
// skip the primary key, skip dropped indexes
if (i == primary_key || share->key_file[i] == NULL) {
continue;
}
- error =
- share->key_file[i]->stat64(
- share->key_file[i],
- txn,
- &dict_stats);
+ error = share->key_file[i]->stat64(
+ share->key_file[i], txn, &dict_stats);
if (error) {
goto cleanup;
}
stats.index_file_length += dict_stats.bt_dsize;
-
- error =
- share->file->get_fragmentation(
- share->file,
- &frag_info);
- if (error) {
- goto cleanup;
- }
- stats.delete_length += frag_info.unused_bytes;
+ stats.delete_length +=
+ dict_stats.bt_fsize - dict_stats.bt_dsize;
}
}
@@ -7651,6 +7637,27 @@ int ha_tokudb::delete_table(const char *name) {
TOKUDB_HANDLER_DBUG_RETURN(error);
}
+static bool tokudb_check_db_dir_exist_from_table_name(const char *table_name) {
+ DBUG_ASSERT(table_name);
+ bool mysql_dir_exists;
+ char db_name[FN_REFLEN];
+ const char *db_name_begin = strchr(table_name, FN_LIBCHAR);
+ const char *db_name_end = strrchr(table_name, FN_LIBCHAR);
+ DBUG_ASSERT(db_name_begin);
+ DBUG_ASSERT(db_name_end);
+ DBUG_ASSERT(db_name_begin != db_name_end);
+
+ ++db_name_begin;
+ size_t db_name_size = db_name_end - db_name_begin;
+
+ DBUG_ASSERT(db_name_size < FN_REFLEN);
+
+ memcpy(db_name, db_name_begin, db_name_size);
+ db_name[db_name_size] = '\0';
+ mysql_dir_exists = (check_db_dir_existence(db_name) == 0);
+
+ return mysql_dir_exists;
+}
//
// renames table from "from" to "to"
@@ -7673,15 +7680,33 @@ int ha_tokudb::rename_table(const char *from, const char *to) {
TOKUDB_SHARE::drop_share(share);
}
int error;
- error = delete_or_rename_table(from, to, false);
- if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
- error == DB_LOCK_NOTGRANTED) {
+ bool to_db_dir_exist = tokudb_check_db_dir_exist_from_table_name(to);
+ if (!to_db_dir_exist) {
sql_print_error(
- "Could not rename table from %s to %s because another transaction "
- "has accessed the table. To rename the table, make sure no "
- "transactions touch the table.",
+ "Could not rename table from %s to %s because "
+ "destination db does not exist",
from,
to);
+#ifndef __WIN__
+ /* Small hack. tokudb_check_db_dir_exist_from_table_name calls
+ * my_access, which sets my_errno on Windows, but doesn't on
+ * unix. Set it for unix too.
+ */
+ my_errno= errno;
+#endif
+ error= my_errno;
+ }
+ else {
+ error = delete_or_rename_table(from, to, false);
+ if (TOKUDB_LIKELY(TOKUDB_DEBUG_FLAGS(TOKUDB_DEBUG_HIDE_DDL_LOCK_ERRORS) == 0) &&
+ error == DB_LOCK_NOTGRANTED) {
+ sql_print_error(
+ "Could not rename table from %s to %s because another transaction "
+ "has accessed the table. To rename the table, make sure no "
+ "transactions touch the table.",
+ from,
+ to);
+ }
}
TOKUDB_HANDLER_DBUG_RETURN(error);
}
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index 3d7a3a7fa05..4a7e395d0d1 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -816,6 +816,8 @@ public:
int index_first(uchar * buf);
int index_last(uchar * buf);
+ bool has_gap_locks() const { return true; }
+
int rnd_init(bool scan);
int rnd_end();
int rnd_next(uchar * buf);
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
new file mode 100644
index 00000000000..74148bd4e74
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
@@ -0,0 +1,46 @@
+SET GLOBAL tokudb_dir_per_db=true;
+######
+# Tokudb and mysql data dirs are the same, rename to existent db
+###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+The content of "test" directory:
+The content of "new_db" directory:
+db.opt
+t1.frm
+t1_main_id.tokudb
+t1_status_id.tokudb
+DROP DATABASE new_db;
+######
+# Tokudb and mysql data dirs are the same, rename to nonexistent db
+###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+ALTER TABLE test.t1 RENAME foo.t1;
+ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 2 "No such file or directory")
+DROP TABLE t1;
+SELECT @@tokudb_data_dir;
+@@tokudb_data_dir
+NULL
+SELECT @@tokudb_dir_per_db;
+@@tokudb_dir_per_db
+0
+######
+# Tokudb and mysql data dirs are different, rename to existent db
+###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+The content of "test" direcotry:
+The content of "new_db" directory:
+DROP DATABASE new_db;
+######
+# Tokudb and mysql data dirs are different, rename to nonexistent db
+###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+ALTER TABLE test.t1 RENAME foo.t1;
+ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 2 "No such file or directory")
+DROP TABLE t1;
+SET GLOBAL tokudb_dir_per_db=default;
diff --git a/storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result b/storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
new file mode 100644
index 00000000000..41e294f7d8d
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/gap_lock_error.result
@@ -0,0 +1,469 @@
+CREATE TABLE gap1 (id1 INT, id2 INT, id3 INT, c1 INT, value INT,
+PRIMARY KEY (id1, id2, id3),
+INDEX i (c1)) ENGINE=tokudb;
+CREATE TABLE gap2 like gap1;
+CREATE TABLE gap3 (id INT, value INT,
+PRIMARY KEY (id),
+UNIQUE KEY ui(value)) ENGINE=tokudb;
+CREATE TABLE gap4 (id INT, value INT,
+PRIMARY KEY (id)) ENGINE=tokudb
+PARTITION BY HASH(id) PARTITIONS 2;
+insert into gap3 values (1,1), (2,2),(3,3),(4,4),(5,5);
+insert into gap4 values (1,1), (2,2),(3,3),(4,4),(5,5);
+set session autocommit=0;
+select * from gap1 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 for update;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 for update;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 for update;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 for update;
+id value
+1 1
+select * from gap4 where id=1 for update;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) for update;
+id value
+1 1
+2 2
+3 3
+select * from gap4 for update;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 for update;
+id value
+3 3
+4 4
+5 5
+set session autocommit=1;
+select * from gap1 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 for update;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 for update;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 for update;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 for update;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) for update;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 for update;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 for update;
+id value
+1 1
+select * from gap4 where id=1 for update;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) for update;
+id value
+1 1
+2 2
+3 3
+select * from gap4 for update;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 for update;
+id value
+3 3
+4 4
+5 5
+set session autocommit=0;
+select * from gap1 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 lock in share mode;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 lock in share mode;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) lock in share mode;
+id value
+1 1
+2 2
+3 3
+select * from gap4 lock in share mode;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 lock in share mode;
+id value
+3 3
+4 4
+5 5
+set session autocommit=1;
+select * from gap1 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 lock in share mode;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 lock in share mode;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 lock in share mode;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 lock in share mode;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) lock in share mode;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 lock in share mode;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id=1 lock in share mode;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) lock in share mode;
+id value
+1 1
+2 2
+3 3
+select * from gap4 lock in share mode;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 lock in share mode;
+id value
+3 3
+4 4
+5 5
+set session autocommit=0;
+select * from gap1 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 ;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 ;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 ;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 ;
+id value
+1 1
+select * from gap4 where id=1 ;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) ;
+id value
+1 1
+2 2
+3 3
+select * from gap4 ;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 ;
+id value
+3 3
+4 4
+5 5
+set session autocommit=1;
+select * from gap1 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where value != 100 limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 where id1=1 ;
+id1 id2 id3 c1 value
+1 0 2 2 2
+1 0 3 3 3
+select * from gap1 where id1=1 and id2= 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3 != 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 and id3
+between 1 and 3 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 asc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2= 1 order by id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+select * from gap1 order by id1 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 asc, id2 asc, id3 asc limit 1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap1 order by id1 desc limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 order by id1 desc, id2 desc, id3 desc
+limit 1 ;
+id1 id2 id3 c1 value
+500 100 1000 1000 1000
+select * from gap1 force index(i) where c1=1 ;
+id1 id2 id3 c1 value
+0 0 1 1 1
+select * from gap3 force index(ui) where value=1 ;
+id value
+1 1
+select * from gap1 where id1=1 and id2=1 and id3=1 ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3 in (1, 2, 3) ;
+id1 id2 id3 c1 value
+select * from gap1 where id1=1 and id2=1 and id3=1 and value=1
+order by c1 ;
+id1 id2 id3 c1 value
+select * from gap3 where id=1 ;
+id value
+1 1
+select * from gap4 where id=1 ;
+id value
+1 1
+select * from gap4 where id in (1, 2, 3) ;
+id value
+1 1
+2 2
+3 3
+select * from gap4 ;
+id value
+2 2
+4 4
+1 1
+3 3
+5 5
+select * from gap4 where id between 3 and 7 ;
+id value
+3 3
+4 4
+5 5
+set session autocommit=0;
+insert into gap1 (id1, id2, id3) values (-1,-1,-1);
+insert into gap1 (id1, id2, id3) values (-1,-1,-1)
+on duplicate key update value=100;
+update gap1 set value=100 where id1=1;
+update gap1 set value=100 where id1=1 and id2=1 and id3=1;
+delete from gap1 where id1=2;
+delete from gap1 where id1=-1 and id2=-1 and id3=-1;
+commit;
+set session autocommit=1;
+insert into gap1 (id1, id2, id3) values (-1,-1,-1);
+insert into gap1 (id1, id2, id3) values (-1,-1,-1)
+on duplicate key update value=100;
+update gap1 set value=100 where id1=1;
+update gap1 set value=100 where id1=1 and id2=1 and id3=1;
+delete from gap1 where id1=2;
+delete from gap1 where id1=-1 and id2=-1 and id3=-1;
+commit;
+drop table gap1, gap2, gap3, gap4;
diff --git a/storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
new file mode 100644
index 00000000000..089d1d2b136
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/r/percona_kill_idle_trx_tokudb.result
@@ -0,0 +1,43 @@
+SET default_storage_engine=TokuDB;
+#
+# Test kill_idle_transaction_timeout feature with TokuDB
+#
+CREATE TABLE t1 (a INT);
+SET GLOBAL kill_idle_transaction= 1;
+BEGIN;
+INSERT INTO t1 VALUES (1),(2);
+COMMIT;
+SELECT * FROM t1;
+a
+1
+2
+BEGIN;
+INSERT INTO t1 VALUES (3);
+# Current connection idle transaction killed, reconnecting
+SELECT * FROM t1;
+a
+1
+2
+#
+# Test that row locks are released on idle transaction kill
+#
+SET GLOBAL kill_idle_transaction= 2;
+# Take row locks in connection conn1
+BEGIN;
+SELECT * FROM t1 FOR UPDATE;
+a
+1
+2
+# Take row locks in connection default
+UPDATE t1 SET a=4;
+SELECT * FROM t1;
+a
+4
+4
+# Show that connection conn1 has been killed
+SELECT * FROM t1;
+ERROR HY000: MySQL server has gone away
+# connection default
+# Cleanup
+DROP TABLE t1;
+SET GLOBAL kill_idle_transaction= saved_kill_idle_transaction;
diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
new file mode 100644
index 00000000000..17fe0188a6e
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/dir_per_db_rename_to_nonexisting_schema.test
@@ -0,0 +1,64 @@
+--source include/have_tokudb.inc
+
+SET GLOBAL tokudb_dir_per_db=true;
+--let DATADIR=`SELECT @@datadir`
+
+--echo ######
+--echo # Tokudb and mysql data dirs are the same, rename to existent db
+--echo ###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+--echo The content of "test" directory:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+--list_files $DATADIR/test
+--echo The content of "new_db" directory:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+--list_files $DATADIR/new_db
+DROP DATABASE new_db;
+
+--echo ######
+--echo # Tokudb and mysql data dirs are the same, rename to nonexistent db
+--echo ###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+--error ER_ERROR_ON_RENAME
+ALTER TABLE test.t1 RENAME foo.t1;
+DROP TABLE t1;
+
+--let $custom_tokudb_data_dir=$MYSQL_TMP_DIR/custom_tokudb_data_dir
+--mkdir $custom_tokudb_data_dir
+--replace_result $custom_tokudb_data_dir CUSTOM_TOKUDB_DATA_DIR
+--source include/restart_mysqld.inc
+
+--replace_result $custom_tokudb_data_dir CUSTOM_TOKUDB_DATA_DIR
+SELECT @@tokudb_data_dir;
+SELECT @@tokudb_dir_per_db;
+
+--echo ######
+--echo # Tokudb and mysql data dirs are different, rename to existent db
+--echo ###
+CREATE DATABASE new_db;
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+ALTER TABLE test.t1 RENAME new_db.t1;
+--echo The content of "test" direcotry:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+--echo The content of "new_db" directory:
+--source include/table_files_replace_pattern.inc
+--sorted_result
+DROP DATABASE new_db;
+
+--echo ######
+--echo # Tokudb and mysql data dirs are different, rename to nonexistent db
+--echo ###
+CREATE TABLE t1 (id INT AUTO_INCREMENT PRIMARY KEY NOT NULL) ENGINE=tokudb;
+CALL mtr.add_suppression("because destination db does not exist");
+--error ER_ERROR_ON_RENAME
+ALTER TABLE test.t1 RENAME foo.t1;
+DROP TABLE t1;
+
+SET GLOBAL tokudb_dir_per_db=default;
+
diff --git a/storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test b/storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
new file mode 100644
index 00000000000..8c52cef9e27
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/gap_lock_error.test
@@ -0,0 +1,5 @@
+--source include/have_tokudb.inc
+
+let $engine=tokudb;
+let $expect_gap_lock_errors=0;
+--source include/gap_lock_error_all.inc
diff --git a/storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test b/storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
new file mode 100644
index 00000000000..743fb9a55a7
--- /dev/null
+++ b/storage/tokudb/mysql-test/tokudb/t/percona_kill_idle_trx_tokudb.test
@@ -0,0 +1,5 @@
+--source include/have_tokudb.inc
+--skip MariaDB doesn't support kill_idle_trx variable for all SE
+
+SET default_storage_engine=TokuDB;
+--source include/percona_kill_idle_trx.inc
diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
index 0d80cf85a91..5d4cb245e27 100644
--- a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
+++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt
@@ -1 +1 @@
-$TOKUDB_OPT $TOKUDB_LOAD_ADD $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
+$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc
index bce81f95ead..417eeb2c367 100644
--- a/storage/xtradb/btr/btr0btr.cc
+++ b/storage/xtradb/btr/btr0btr.cc
@@ -3571,8 +3571,6 @@ btr_level_list_remove_func(
ulint prev_page_no;
ulint next_page_no;
- ut_ad(page != NULL);
- ut_ad(mtr != NULL);
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(space == page_get_space_id(page));
/* Get the previous and next page numbers of page */
diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc
index afe2c181594..512b791f212 100644
--- a/storage/xtradb/btr/btr0cur.cc
+++ b/storage/xtradb/btr/btr0cur.cc
@@ -1843,7 +1843,7 @@ btr_cur_pessimistic_insert(
/*************************************************************//**
For an update, checks the locks and does the undo logging.
@return DB_SUCCESS, DB_WAIT_LOCK, or error number */
-UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,6,7)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
dberr_t
btr_cur_upd_lock_and_undo(
/*======================*/
@@ -2073,7 +2073,6 @@ btr_cur_update_alloc_zip_func(
const page_t* page = page_cur_get_page(cursor);
ut_ad(page_zip == page_cur_get_page_zip(cursor));
- ut_ad(page_zip);
ut_ad(!dict_index_is_ibuf(index));
ut_ad(rec_offs_validate(page_cur_get_rec(cursor), index, offsets));
@@ -3940,19 +3939,42 @@ inexact:
return(n_rows);
}
-/*******************************************************************//**
-Estimates the number of rows in a given index range.
-@return estimated number of rows */
-UNIV_INTERN
-ib_int64_t
-btr_estimate_n_rows_in_range(
-/*=========================*/
- dict_index_t* index, /*!< in: index */
- const dtuple_t* tuple1, /*!< in: range start, may also be empty tuple */
- ulint mode1, /*!< in: search mode for range start */
- const dtuple_t* tuple2, /*!< in: range end, may also be empty tuple */
- ulint mode2, /*!< in: search mode for range end */
- trx_t* trx) /*!< in: trx */
+/** If the tree gets changed too much between the two dives for the left
+and right boundary then btr_estimate_n_rows_in_range_low() will retry
+that many times before giving up and returning the value stored in
+rows_in_range_arbitrary_ret_val. */
+static const unsigned rows_in_range_max_retries = 4;
+
+/** We pretend that a range has that many records if the tree keeps changing
+for rows_in_range_max_retries retries while we try to estimate the records
+in a given range. */
+static const int64_t rows_in_range_arbitrary_ret_val = 10;
+
+/** Estimates the number of rows in a given index range.
+@param[in] index index
+@param[in] tuple1 range start, may also be empty tuple
+@param[in] mode1 search mode for range start
+@param[in] tuple2 range end, may also be empty tuple
+@param[in] mode2 search mode for range end
+@param[in] trx trx
+@param[in] nth_attempt if the tree gets modified too much while
+we are trying to analyze it, then we will retry (this function will call
+itself, incrementing this parameter)
+@return estimated number of rows; if after rows_in_range_max_retries
+retries the tree keeps changing, then we will just return
+rows_in_range_arbitrary_ret_val as a result (if
+nth_attempt >= rows_in_range_max_retries and the tree is modified between
+the two dives). */
+static
+int64_t
+btr_estimate_n_rows_in_range_low(
+ dict_index_t* index,
+ const dtuple_t* tuple1,
+ ulint mode1,
+ const dtuple_t* tuple2,
+ ulint mode2,
+ trx_t* trx,
+ unsigned nth_attempt)
{
btr_path_t path1[BTR_PATH_ARRAY_N_SLOTS];
btr_path_t path2[BTR_PATH_ARRAY_N_SLOTS];
@@ -3990,6 +4012,12 @@ btr_estimate_n_rows_in_range(
mtr_start_trx(&mtr, trx);
+#ifdef UNIV_DEBUG
+ if (!strcmp(index->name, "iC")) {
+ DEBUG_SYNC_C("btr_estimate_n_rows_in_range_between_dives");
+ }
+#endif
+
cursor.path_arr = path2;
if (dtuple_get_n_fields(tuple2) > 0) {
@@ -4056,6 +4084,33 @@ btr_estimate_n_rows_in_range(
if (!diverged && slot1->nth_rec != slot2->nth_rec) {
+ /* If both slots do not point to the same page or if
+ the paths have crossed and the same page on both
+ apparently contains a different number of records,
+ this means that the tree must have changed between
+ the dive for slot1 and the dive for slot2 at the
+ beginning of this function. */
+ if (slot1->page_no != slot2->page_no
+ || slot1->page_level != slot2->page_level
+ || (slot1->nth_rec >= slot2->nth_rec
+ && slot1->n_recs != slot2->n_recs)) {
+
+ /* If the tree keeps changing even after a
+ few attempts, then just return some arbitrary
+ number. */
+ if (nth_attempt >= rows_in_range_max_retries) {
+ return(rows_in_range_arbitrary_ret_val);
+ }
+
+ const int64_t ret =
+ btr_estimate_n_rows_in_range_low(
+ index, tuple1, mode1,
+ tuple2, mode2, trx,
+ nth_attempt + 1);
+
+ return(ret);
+ }
+
diverged = TRUE;
if (slot1->nth_rec < slot2->nth_rec) {
@@ -4074,7 +4129,7 @@ btr_estimate_n_rows_in_range(
in this case slot1->nth_rec will point
to the supr record and slot2->nth_rec
will point to 6 */
- n_rows = 0;
+ return(0);
}
} else if (diverged && !diverged_lot) {
@@ -4105,6 +4160,30 @@ btr_estimate_n_rows_in_range(
}
}
+/** Estimates the number of rows in a given index range.
+@param[in] index index
+@param[in] tuple1 range start, may also be empty tuple
+@param[in] mode1 search mode for range start
+@param[in] tuple2 range end, may also be empty tuple
+@param[in] mode2 search mode for range end
+@param[in] trx trx
+@return estimated number of rows */
+int64_t
+btr_estimate_n_rows_in_range(
+ dict_index_t* index,
+ const dtuple_t* tuple1,
+ ulint mode1,
+ const dtuple_t* tuple2,
+ ulint mode2,
+ trx_t* trx)
+{
+ const int64_t ret = btr_estimate_n_rows_in_range_low(
+ index, tuple1, mode1, tuple2, mode2, trx,
+ 1 /* first attempt */);
+
+ return(ret);
+}
+
/*******************************************************************//**
Record the number of non_null key values in a given index for
each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
@@ -4567,7 +4646,6 @@ btr_cur_disown_inherited_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!rec_offs_comp(offsets) || !rec_get_node_ptr_flag(rec));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(mtr);
for (i = 0; i < rec_offs_n_fields(offsets); i++) {
if (rec_offs_nth_extern(offsets, i)
@@ -4630,9 +4708,6 @@ btr_push_update_extern_fields(
ulint n;
const upd_field_t* uf;
- ut_ad(tuple);
- ut_ad(update);
-
uf = update->fields;
n = upd_get_n_fields(update);
@@ -4816,7 +4891,6 @@ btr_store_big_rec_extern_fields(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
- ut_ad(btr_mtr);
ut_ad(mtr_memo_contains(btr_mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(btr_mtr, rec_block, MTR_MEMO_PAGE_X_FIX));
diff --git a/storage/xtradb/buf/buf0buddy.cc b/storage/xtradb/buf/buf0buddy.cc
index 8cb880c1169..2ee39c6c992 100644
--- a/storage/xtradb/buf/buf0buddy.cc
+++ b/storage/xtradb/buf/buf0buddy.cc
@@ -485,7 +485,6 @@ buf_buddy_alloc_low(
{
buf_block_t* block;
- ut_ad(lru);
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
ut_ad(!mutex_own(&buf_pool->zip_mutex));
ut_ad(i >= buf_buddy_get_slot(UNIV_ZIP_SIZE_MIN));
diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc
index 496cf63d939..b1156b4c646 100644
--- a/storage/xtradb/buf/buf0buf.cc
+++ b/storage/xtradb/buf/buf0buf.cc
@@ -3895,15 +3895,6 @@ buf_page_init(
/* Set the state of the block */
buf_block_set_file_page(block, space, offset);
-#ifdef UNIV_DEBUG_VALGRIND
- if (!space) {
- /* Silence valid Valgrind warnings about uninitialized
- data being written to data files. There are some unused
- bytes on some pages that InnoDB does not initialize. */
- UNIV_MEM_VALID(block->frame, UNIV_PAGE_SIZE);
- }
-#endif /* UNIV_DEBUG_VALGRIND */
-
buf_block_init_low(block);
block->lock_hash_val = lock_rec_hash(space, offset);
diff --git a/storage/xtradb/buf/buf0lru.cc b/storage/xtradb/buf/buf0lru.cc
index 579166753c4..dff67c0fad6 100644
--- a/storage/xtradb/buf/buf0lru.cc
+++ b/storage/xtradb/buf/buf0lru.cc
@@ -1301,6 +1301,71 @@ buf_LRU_check_size_of_non_data_objects(
}
}
+/** Diagnose failure to get a free page and request InnoDB monitor output in
+the error log if more than two seconds have been spent already.
+@param[in] n_iterations how many buf_LRU_get_free_page iterations
+ already completed
+@param[in] started_ms timestamp in ms of when the attempt to get the
+ free page started
+@param[in] flush_failures how many times single-page flush, if allowed,
+ has failed
+@param[out] mon_value_was previous srv_print_innodb_monitor value
+@param[out] started_monitor whether InnoDB monitor print has been requested
+*/
+static
+void
+buf_LRU_handle_lack_of_free_blocks(ulint n_iterations, ulint started_ms,
+ ulint flush_failures,
+ ibool *mon_value_was,
+ ibool *started_monitor)
+{
+ static ulint last_printout_ms = 0;
+
+ /* Legacy algorithm started warning after at least 2 seconds, we
+ emulate this. */
+ const ulint current_ms = ut_time_ms();
+
+ if ((current_ms > started_ms + 2000)
+ && (current_ms > last_printout_ms + 2000)) {
+
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Warning: difficult to find free blocks in\n"
+ "InnoDB: the buffer pool (%lu search iterations)!\n"
+ "InnoDB: %lu failed attempts to flush a page!"
+ " Consider\n"
+ "InnoDB: increasing the buffer pool size.\n"
+ "InnoDB: It is also possible that"
+ " in your Unix version\n"
+ "InnoDB: fsync is very slow, or"
+ " completely frozen inside\n"
+ "InnoDB: the OS kernel. Then upgrading to"
+ " a newer version\n"
+ "InnoDB: of your operating system may help."
+ " Look at the\n"
+ "InnoDB: number of fsyncs in diagnostic info below.\n"
+ "InnoDB: Pending flushes (fsync) log: %lu;"
+ " buffer pool: %lu\n"
+ "InnoDB: %lu OS file reads, %lu OS file writes,"
+ " %lu OS fsyncs\n"
+ "InnoDB: Starting InnoDB Monitor to print further\n"
+ "InnoDB: diagnostics to the standard output.\n",
+ (ulong) n_iterations,
+ (ulong) flush_failures,
+ (ulong) fil_n_pending_log_flushes,
+ (ulong) fil_n_pending_tablespace_flushes,
+ (ulong) os_n_file_reads, (ulong) os_n_file_writes,
+ (ulong) os_n_fsyncs);
+
+ last_printout_ms = current_ms;
+ *mon_value_was = srv_print_innodb_monitor;
+ *started_monitor = TRUE;
+ srv_print_innodb_monitor = TRUE;
+ os_event_set(lock_sys->timeout_event);
+ }
+
+}
+
/** The maximum allowed backoff sleep time duration, microseconds */
#define MAX_FREE_LIST_BACKOFF_SLEEP 10000
@@ -1348,6 +1413,7 @@ buf_LRU_get_free_block(
ulint flush_failures = 0;
ibool mon_value_was = FALSE;
ibool started_monitor = FALSE;
+ ulint started_ms = 0;
ut_ad(!mutex_own(&buf_pool->LRU_list_mutex));
@@ -1356,7 +1422,24 @@ loop:
buf_LRU_check_size_of_non_data_objects(buf_pool);
/* If there is a block in the free list, take it */
- block = buf_LRU_get_free_only(buf_pool);
+ if (DBUG_EVALUATE_IF("simulate_lack_of_pages", true, false)) {
+
+ block = NULL;
+
+ if (srv_debug_monitor_printed)
+ DBUG_SET("-d,simulate_lack_of_pages");
+
+ } else if (DBUG_EVALUATE_IF("simulate_recovery_lack_of_pages",
+ recv_recovery_on, false)) {
+
+ block = NULL;
+
+ if (srv_debug_monitor_printed)
+ DBUG_SUICIDE();
+ } else {
+
+ block = buf_LRU_get_free_only(buf_pool);
+ }
if (block) {
@@ -1371,6 +1454,9 @@ loop:
return(block);
}
+ if (!started_ms)
+ started_ms = ut_time_ms();
+
if (srv_empty_free_list_algorithm == SRV_EMPTY_FREE_LIST_BACKOFF
&& buf_lru_manager_is_active
&& (srv_shutdown_state == SRV_SHUTDOWN_NONE
@@ -1408,11 +1494,17 @@ loop:
: FREE_LIST_BACKOFF_LOW_PRIO_DIVIDER));
}
- /* In case of backoff, do not ever attempt single page flushes
- and wait for the cleaner to free some pages instead. */
+ buf_LRU_handle_lack_of_free_blocks(n_iterations, started_ms,
+ flush_failures,
+ &mon_value_was,
+ &started_monitor);
n_iterations++;
+ srv_stats.buf_pool_wait_free.add(n_iterations, 1);
+
+ /* In case of backoff, do not ever attempt single page flushes
+ and wait for the cleaner to free some pages instead. */
goto loop;
} else {
@@ -1444,6 +1536,12 @@ loop:
mutex_exit(&buf_pool->flush_state_mutex);
+ if (DBUG_EVALUATE_IF("simulate_recovery_lack_of_pages", true, false)
+ || DBUG_EVALUATE_IF("simulate_lack_of_pages", true, false)) {
+
+ buf_pool->try_LRU_scan = false;
+ }
+
freed = FALSE;
if (buf_pool->try_LRU_scan || n_iterations > 0) {
@@ -1469,41 +1567,9 @@ loop:
}
- if (n_iterations > 20) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: difficult to find free blocks in\n"
- "InnoDB: the buffer pool (%lu search iterations)!\n"
- "InnoDB: %lu failed attempts to flush a page!"
- " Consider\n"
- "InnoDB: increasing the buffer pool size.\n"
- "InnoDB: It is also possible that"
- " in your Unix version\n"
- "InnoDB: fsync is very slow, or"
- " completely frozen inside\n"
- "InnoDB: the OS kernel. Then upgrading to"
- " a newer version\n"
- "InnoDB: of your operating system may help."
- " Look at the\n"
- "InnoDB: number of fsyncs in diagnostic info below.\n"
- "InnoDB: Pending flushes (fsync) log: %lu;"
- " buffer pool: %lu\n"
- "InnoDB: %lu OS file reads, %lu OS file writes,"
- " %lu OS fsyncs\n"
- "InnoDB: Starting InnoDB Monitor to print further\n"
- "InnoDB: diagnostics to the standard output.\n",
- (ulong) n_iterations,
- (ulong) flush_failures,
- (ulong) fil_n_pending_log_flushes,
- (ulong) fil_n_pending_tablespace_flushes,
- (ulong) os_n_file_reads, (ulong) os_n_file_writes,
- (ulong) os_n_fsyncs);
-
- mon_value_was = srv_print_innodb_monitor;
- started_monitor = TRUE;
- srv_print_innodb_monitor = TRUE;
- os_event_set(srv_monitor_event);
- }
+ buf_LRU_handle_lack_of_free_blocks(n_iterations, started_ms,
+ flush_failures, &mon_value_was,
+ &started_monitor);
/* If we have scanned the whole LRU and still are unable to
find a free block then we should sleep here to let the
diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc
index 90936f6667b..49de1cf7ef8 100644
--- a/storage/xtradb/dict/dict0dict.cc
+++ b/storage/xtradb/dict/dict0dict.cc
@@ -6214,7 +6214,6 @@ dict_set_corrupted(
row_mysql_lock_data_dictionary(trx);
}
- ut_ad(index);
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(!dict_table_is_comp(dict_sys->sys_tables));
ut_ad(!dict_table_is_comp(dict_sys->sys_indexes));
diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc
index c13d4583fef..6a28f3cdf8f 100644
--- a/storage/xtradb/dict/dict0stats.cc
+++ b/storage/xtradb/dict/dict0stats.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2009, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1161,7 +1161,8 @@ dict_stats_analyze_index_level(
them away) which brings non-determinism. We skip only
leaf-level delete marks because delete marks on
non-leaf level do not make sense. */
- if (level == 0 &&
+
+ if (level == 0 && srv_stats_include_delete_marked? 0:
rec_get_deleted_flag(
rec,
page_is_comp(btr_pcur_get_page(&pcur)))) {
@@ -1347,8 +1348,12 @@ enum page_scan_method_t {
the given page and count the number of
distinct ones, also ignore delete marked
records */
- QUIT_ON_FIRST_NON_BORING/* quit when the first record that differs
+ QUIT_ON_FIRST_NON_BORING,/* quit when the first record that differs
from its right neighbor is found */
+ COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED/* scan all records on
+ the given page and count the number of
+ distinct ones, include delete marked
+ records */
};
/* @} */
@@ -1624,6 +1629,8 @@ dict_stats_analyze_index_below_cur(
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
+ srv_stats_include_delete_marked ?
+ COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED:
COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff,
n_external_pages);
diff --git a/storage/xtradb/dyn/dyn0dyn.cc b/storage/xtradb/dyn/dyn0dyn.cc
index 3ef5297a7c9..dd1f6863c14 100644
--- a/storage/xtradb/dyn/dyn0dyn.cc
+++ b/storage/xtradb/dyn/dyn0dyn.cc
@@ -40,7 +40,6 @@ dyn_array_add_block(
mem_heap_t* heap;
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc
index dfaef5fdd5f..3043e780268 100644
--- a/storage/xtradb/fsp/fsp0fsp.cc
+++ b/storage/xtradb/fsp/fsp0fsp.cc
@@ -133,7 +133,7 @@ fsp_fill_free_list(
ulint space, /*!< in: space */
fsp_header_t* header, /*!< in/out: space header */
mtr_t* mtr) /*!< in/out: mini-transaction */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/**********************************************************************//**
Allocates a single free page from a segment. This function implements
the intelligent allocation strategy which tries to minimize file space
@@ -162,7 +162,7 @@ fseg_alloc_free_page_low(
in which the page should be initialized.
If init_mtr!=mtr, but the page is already
latched in mtr, do not initialize the page. */
- MY_ATTRIBUTE((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1074,8 +1074,6 @@ fsp_fill_free_list(
ulint i;
mtr_t ibuf_mtr;
- ut_ad(header != NULL);
- ut_ad(mtr != NULL);
ut_ad(page_offset(header) == FSP_HEADER_OFFSET);
/* Check if we can fill free list from above the free list limit */
@@ -1338,7 +1336,7 @@ Allocates a single free page from a space. The page is marked as used.
@retval block, rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
(init_mtr == mtr, or the page was not previously freed in mtr)
@retval block (not allocated or initialized) otherwise */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
+static MY_ATTRIBUTE((warn_unused_result))
buf_block_t*
fsp_alloc_free_page(
/*================*/
@@ -1358,9 +1356,6 @@ fsp_alloc_free_page(
ulint page_no;
ulint space_size;
- ut_ad(mtr);
- ut_ad(init_mtr);
-
header = fsp_get_space_header(space, zip_size, mtr);
/* Get the hinted descriptor */
@@ -2379,7 +2374,6 @@ fseg_alloc_free_page_low(
ibool success;
ulint n;
- ut_ad(mtr);
ut_ad((direction >= FSP_UP) && (direction <= FSP_NO_DIR));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
diff --git a/storage/xtradb/fts/fts0opt.cc b/storage/xtradb/fts/fts0opt.cc
index ed882d33548..cb30122adcb 100644
--- a/storage/xtradb/fts/fts0opt.cc
+++ b/storage/xtradb/fts/fts0opt.cc
@@ -579,9 +579,6 @@ fts_zip_read_word(
fts_zip_t* zip, /*!< in: Zip state + data */
fts_string_t* word) /*!< out: uncompressed word */
{
-#ifdef UNIV_DEBUG
- ulint i;
-#endif
short len = 0;
void* null = NULL;
byte* ptr = word->f_str;
@@ -656,10 +653,9 @@ fts_zip_read_word(
}
}
-#ifdef UNIV_DEBUG
/* All blocks must be freed at end of inflate. */
if (zip->status != Z_OK) {
- for (i = 0; i < ib_vector_size(zip->blocks); ++i) {
+ for (ulint i = 0; i < ib_vector_size(zip->blocks); ++i) {
if (ib_vector_getp(zip->blocks, i)) {
ut_free(ib_vector_getp(zip->blocks, i));
ib_vector_set(zip->blocks, i, &null);
@@ -670,7 +666,6 @@ fts_zip_read_word(
if (ptr != NULL) {
ut_ad(word->f_len == strlen((char*) ptr));
}
-#endif /* UNIV_DEBUG */
return(zip->status == Z_OK || zip->status == Z_STREAM_END ? ptr : NULL);
}
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 4e5f223bc1e..5a10492fa62 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -1743,6 +1743,7 @@ static bool innobase_purge_archive_logs(
}
#endif
+
/*************************************************************//**
Check for a valid value of innobase_commit_concurrency.
@return 0 for valid innodb_commit_concurrency */
@@ -4343,14 +4344,9 @@ innobase_change_buffering_inited_ok:
innobase_commit_concurrency_init_default();
-#ifndef EXTENDED_FOR_KILLIDLE
- srv_kill_idle_transaction = 0;
-#endif
-
#ifdef HAVE_POSIX_FALLOCATE
srv_use_posix_fallocate = (ibool) innobase_use_fallocate;
#endif
-
/* Do not enable backoff algorithm for small buffer pool. */
if (!innodb_empty_free_list_algorithm_backoff_allowed(
static_cast<srv_empty_free_list_t>(
@@ -14096,9 +14092,13 @@ ha_innobase::info_low(
/* If this table is already queued for
background analyze, remove it from the
queue as we are about to do the same */
- dict_mutex_enter_for_mysql();
- dict_stats_recalc_pool_del(ib_table);
- dict_mutex_exit_for_mysql();
+ if (!srv_read_only_mode) {
+
+ dict_mutex_enter_for_mysql();
+ dict_stats_recalc_pool_del(
+ ib_table);
+ dict_mutex_exit_for_mysql();
+ }
opt = DICT_STATS_RECALC_PERSISTENT;
} else {
@@ -16637,6 +16637,37 @@ ha_innobase::get_auto_increment(
ulonglong col_max_value = innobase_get_int_col_max_value(
table->next_number_field);
+ /** The following logic is needed to avoid duplicate key error
+ for autoincrement column.
+
+ (1) InnoDB gives the current autoincrement value with respect
+ to increment and offset value.
+
+ (2) Basically it does compute_next_insert_id() logic inside InnoDB
+ to avoid the current auto increment value changed by handler layer.
+
+ (3) It is restricted only for insert operations. */
+
+ if (increment > 1 && thd_sql_command(user_thd) != SQLCOM_ALTER_TABLE
+ && autoinc < col_max_value) {
+
+ ulonglong prev_auto_inc = autoinc;
+
+ autoinc = ((autoinc - 1) + increment - offset)/ increment;
+
+ autoinc = autoinc * increment + offset;
+
+ /* If autoinc exceeds the col_max_value then reset
+ to old autoinc value. Because in case of non-strict
+ sql mode, boundary value is not considered as error. */
+
+ if (autoinc >= col_max_value) {
+ autoinc = prev_auto_inc;
+ }
+
+ ut_ad(autoinc > 0);
+ }
+
/* Called for the first time ? */
if (trx->n_autoinc_rows == 0) {
@@ -19149,32 +19180,6 @@ innobase_fts_retrieve_ranking(
}
/***********************************************************************
-functions for kill session of idle transaction */
-ibool
-innobase_thd_is_idle(
-/*=================*/
- const void* thd) /*!< in: thread handle (THD*) */
-{
-#ifdef EXTENDED_FOR_KILLIDLE
- return(thd_command((const THD*) thd) == COM_SLEEP);
-#else
- return(FALSE);
-#endif
-}
-
-ib_int64_t
-innobase_thd_get_start_time(
-/*========================*/
- const void* thd) /*!< in: thread handle (THD*) */
-{
-#ifdef EXTENDED_FOR_KILLIDLE
- return((ib_int64_t)thd_start_time((const THD*) thd));
-#else
- return(0); /*dummy value*/
-#endif
-}
-
-/***********************************************************************
Free the memory for the FTS handler */
UNIV_INTERN
void
@@ -19193,19 +19198,6 @@ innobase_fts_close_ranking(
return;
}
-UNIV_INTERN
-void
-innobase_thd_kill(
-/*==============*/
- ulong thd_id)
-{
-#ifdef EXTENDED_FOR_KILLIDLE
- thd_kill(thd_id);
-#else
- return;
-#endif
-}
-
/***********************************************************************
Find and Retrieve the FTS Relevance Ranking result for doc with doc_id
of prebuilt->fts_doc_id
@@ -19403,16 +19395,6 @@ innobase_fts_retrieve_docid(
}
-ulong
-innobase_thd_get_thread_id(
-/*=======================*/
- const void* thd)
-{
- return(thd_get_thread_id((const THD*) thd));
-}
-
-
-
/***********************************************************************
Find and retrieve the size of the current result
@return number of matching rows */
@@ -20067,6 +20049,12 @@ static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite,
"Disable with --skip-innodb-doublewrite.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_BOOL(stats_include_delete_marked,
+ srv_stats_include_delete_marked,
+ PLUGIN_VAR_OPCMDARG,
+ "Scan delete marked records for persistent stat",
+ NULL, NULL, FALSE);
+
static MYSQL_SYSVAR_BOOL(use_atomic_writes, innobase_use_atomic_writes,
PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
"Prevent partial page writes, via atomic writes (beta). "
@@ -21418,6 +21406,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(data_file_path),
MYSQL_SYSVAR(data_home_dir),
MYSQL_SYSVAR(doublewrite),
+ MYSQL_SYSVAR(stats_include_delete_marked),
MYSQL_SYSVAR(api_enable_binlog),
MYSQL_SYSVAR(api_enable_mdl),
MYSQL_SYSVAR(api_disable_rowlock),
diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h
index 783077ceaf1..f91c0fb4703 100644
--- a/storage/xtradb/handler/ha_innodb.h
+++ b/storage/xtradb/handler/ha_innodb.h
@@ -167,6 +167,8 @@ class ha_innobase: public handler
int index_first(uchar * buf);
int index_last(uchar * buf);
+ bool has_gap_locks() const { return true; }
+
int rnd_init(bool scan);
int rnd_end();
int rnd_next(uchar *buf);
diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc
index 740f15c2bd5..aede923d22f 100644
--- a/storage/xtradb/handler/handler0alter.cc
+++ b/storage/xtradb/handler/handler0alter.cc
@@ -1868,6 +1868,7 @@ innobase_fts_check_doc_id_index_in_def(
return(FTS_NOT_EXIST_DOC_ID_INDEX);
}
+
/*******************************************************************//**
Create an index table where indexes are ordered as follows:
@@ -1936,26 +1937,11 @@ innobase_create_key_defs(
(only prefix/part of the column is indexed), MySQL will treat the
index as a PRIMARY KEY unless the table already has one. */
- if (n_add > 0 && !new_primary && got_default_clust
- && (key_info[*add].flags & HA_NOSAME)
- && !(key_info[*add].flags & HA_KEY_HAS_PART_KEY_SEG)) {
- uint key_part = key_info[*add].user_defined_key_parts;
-
- new_primary = true;
+ ut_ad(altered_table->s->primary_key == 0
+ || altered_table->s->primary_key == MAX_KEY);
- while (key_part--) {
- const uint maybe_null
- = key_info[*add].key_part[key_part].key_type
- & FIELDFLAG_MAYBE_NULL;
- DBUG_ASSERT(!maybe_null
- == !key_info[*add].key_part[key_part].
- field->real_maybe_null());
-
- if (maybe_null) {
- new_primary = false;
- break;
- }
- }
+ if (got_default_clust && !new_primary) {
+ new_primary = (altered_table->s->primary_key != MAX_KEY);
}
const bool rebuild = new_primary || add_fts_doc_id
@@ -1974,8 +1960,14 @@ innobase_create_key_defs(
ulint primary_key_number;
if (new_primary) {
- DBUG_ASSERT(n_add > 0);
- primary_key_number = *add;
+ if (n_add == 0) {
+ DBUG_ASSERT(got_default_clust);
+ DBUG_ASSERT(altered_table->s->primary_key
+ == 0);
+ primary_key_number = 0;
+ } else {
+ primary_key_number = *add;
+ }
} else if (got_default_clust) {
/* Create the GEN_CLUST_INDEX */
index_def_t* index = indexdef++;
@@ -3097,6 +3089,8 @@ prepare_inplace_alter_table_dict(
ctx->add_cols = add_cols;
} else {
DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info, old_table));
+ DBUG_ASSERT(old_table->s->primary_key
+ == altered_table->s->primary_key);
if (!ctx->new_table->fts
&& innobase_fulltext_exist(altered_table)) {
@@ -4142,6 +4136,27 @@ found_col:
add_fts_doc_id_idx, prebuilt));
}
+/** Get the name of an erroneous key.
+@param[in] error_key_num InnoDB number of the erroneus key
+@param[in] ha_alter_info changes that were being performed
+@param[in] table InnoDB table
+@return the name of the erroneous key */
+static
+const char*
+get_error_key_name(
+ ulint error_key_num,
+ const Alter_inplace_info* ha_alter_info,
+ const dict_table_t* table)
+{
+ if (error_key_num == ULINT_UNDEFINED) {
+ return(FTS_DOC_ID_INDEX_NAME);
+ } else if (ha_alter_info->key_count == 0) {
+ return(dict_table_get_first_index(table)->name);
+ } else {
+ return(ha_alter_info->key_info_buffer[error_key_num].name);
+ }
+}
+
/** Alter the table structure in-place with operations
specified using Alter_inplace_info.
The level of concurrency allowed during this operation depends
@@ -4264,17 +4279,13 @@ oom:
case DB_ONLINE_LOG_TOO_BIG:
DBUG_ASSERT(ctx->online);
my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
- (prebuilt->trx->error_key_num == ULINT_UNDEFINED)
- ? FTS_DOC_ID_INDEX_NAME
- : ha_alter_info->key_info_buffer[
- prebuilt->trx->error_key_num].name);
+ get_error_key_name(prebuilt->trx->error_key_num,
+ ha_alter_info, prebuilt->table));
break;
case DB_INDEX_CORRUPT:
my_error(ER_INDEX_CORRUPT, MYF(0),
- (prebuilt->trx->error_key_num == ULINT_UNDEFINED)
- ? FTS_DOC_ID_INDEX_NAME
- : ha_alter_info->key_info_buffer[
- prebuilt->trx->error_key_num].name);
+ get_error_key_name(prebuilt->trx->error_key_num,
+ ha_alter_info, prebuilt->table));
break;
case DB_DECRYPTION_FAILED: {
String str;
@@ -5094,7 +5105,6 @@ innobase_update_foreign_cache(
"Foreign key constraints for table '%s'"
" are loaded with charset check off",
user_table->name);
-
}
}
@@ -5194,14 +5204,13 @@ commit_try_rebuild(
DBUG_RETURN(true);
case DB_ONLINE_LOG_TOO_BIG:
my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
- ha_alter_info->key_info_buffer[0].name);
+ get_error_key_name(err_key, ha_alter_info,
+ rebuilt_table));
DBUG_RETURN(true);
case DB_INDEX_CORRUPT:
my_error(ER_INDEX_CORRUPT, MYF(0),
- (err_key == ULINT_UNDEFINED)
- ? FTS_DOC_ID_INDEX_NAME
- : ha_alter_info->key_info_buffer[err_key]
- .name);
+ get_error_key_name(err_key, ha_alter_info,
+ rebuilt_table));
DBUG_RETURN(true);
default:
my_error_innodb(error, table_name, user_table->flags);
diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc
index 420dff83a40..97d2bd36912 100644
--- a/storage/xtradb/handler/i_s.cc
+++ b/storage/xtradb/handler/i_s.cc
@@ -8324,29 +8324,7 @@ i_s_innodb_changed_pages_fill(
while(log_online_bitmap_iterator_next(&i) &&
(!srv_max_changed_pages ||
- output_rows_num < srv_max_changed_pages) &&
- /*
- There is no need to compare both start LSN and end LSN fields
- with maximum value. It's enough to compare only start LSN.
- Example:
-
- max_lsn = 100
- \\\\\\\\\\\\\\\\\\\\\\\\\|\\\\\\\\ - Query 1
- I------I I-------I I-------------I I----I
- ////////////////// | - Query 2
- 1 2 3 4
-
- Query 1:
- SELECT * FROM INNODB_CHANGED_PAGES WHERE start_lsn < 100
- will select 1,2,3 bitmaps
- Query 2:
- SELECT * FROM INNODB_CHANGED_PAGES WHERE end_lsn < 100
- will select 1,2 bitmaps
-
- The condition start_lsn <= 100 will be false after reading
- 1,2,3 bitmaps which suits for both cases.
- */
- LOG_BITMAP_ITERATOR_START_LSN(i) <= max_lsn)
+ output_rows_num < srv_max_changed_pages))
{
if (!LOG_BITMAP_ITERATOR_PAGE_CHANGED(i))
continue;
diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h
index f485d072c4c..960bd55d3d9 100644
--- a/storage/xtradb/include/btr0cur.h
+++ b/storage/xtradb/include/btr0cur.h
@@ -294,11 +294,7 @@ btr_cur_update_alloc_zip_func(
false=update-in-place */
mtr_t* mtr, /*!< in/out: mini-transaction */
trx_t* trx) /*!< in: NULL or transaction */
-#ifdef UNIV_DEBUG
- MY_ATTRIBUTE((nonnull (1, 2, 3, 4, 7), warn_unused_result));
-#else
- MY_ATTRIBUTE((nonnull (1, 2, 3, 6), warn_unused_result));
-#endif
+ MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
# define btr_cur_update_alloc_zip(page_zip,cursor,index,offsets,len,cr,mtr,trx) \
@@ -428,7 +424,7 @@ btr_cur_del_mark_set_clust_rec(
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/***********************************************************//**
Sets a secondary index record delete mark to TRUE or FALSE.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
@@ -441,7 +437,7 @@ btr_cur_del_mark_set_sec_rec(
ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*************************************************************//**
Tries to compress a page of the tree if it seems useful. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. To avoid
@@ -609,8 +605,7 @@ btr_cur_disown_inherited_fields(
dict_index_t* index, /*!< in: index of the page */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
const upd_t* update, /*!< in: update vector */
- mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull(2,3,4,5,6)));
+ mtr_t* mtr); /*!< in/out: mini-transaction */
/** Operation code for btr_store_big_rec_extern_fields(). */
enum blob_op {
@@ -655,7 +650,7 @@ btr_store_big_rec_extern_fields(
mtr_t* btr_mtr, /*!< in: mtr containing the
latches to the clustered index */
enum blob_op op) /*! in: operation code */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************************//**
Frees the space in an externally stored field to the file space
@@ -751,8 +746,7 @@ btr_push_update_extern_fields(
/*==========================*/
dtuple_t* tuple, /*!< in/out: data tuple */
const upd_t* update, /*!< in: update vector */
- mem_heap_t* heap) /*!< in: memory heap */
- MY_ATTRIBUTE((nonnull));
+ mem_heap_t* heap); /*!< in: memory heap */
/***********************************************************//**
Sets a secondary index record's delete mark to the given value. This
function is only used by the insert buffer merge mechanism. */
diff --git a/storage/xtradb/include/btr0sea.h b/storage/xtradb/include/btr0sea.h
index 8f438bf640e..bfe2c43defb 100644
--- a/storage/xtradb/include/btr0sea.h
+++ b/storage/xtradb/include/btr0sea.h
@@ -200,7 +200,7 @@ hash_table_t*
btr_search_get_hash_table(
/*======================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((pure,warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Returns the adaptive hash index latch for a given index key.
@@ -210,7 +210,7 @@ prio_rw_lock_t*
btr_search_get_latch(
/*=================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((pure,warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Returns the AHI partition number corresponding to a given index ID. */
@@ -227,8 +227,7 @@ UNIV_INLINE
void
btr_search_index_init(
/*===============*/
- dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull));
+ dict_index_t* index); /*!< in: index */
/********************************************************************//**
Latches all adaptive hash index latches in exclusive mode. */
diff --git a/storage/xtradb/include/btr0sea.ic b/storage/xtradb/include/btr0sea.ic
index 3cbcff75f31..e963d8a8449 100644
--- a/storage/xtradb/include/btr0sea.ic
+++ b/storage/xtradb/include/btr0sea.ic
@@ -90,7 +90,6 @@ btr_search_get_hash_table(
/*======================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->search_table);
return(index->search_table);
@@ -105,7 +104,6 @@ btr_search_get_latch(
/*=================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->search_latch >= btr_search_latch_arr &&
index->search_latch < btr_search_latch_arr +
btr_search_index_num);
@@ -132,8 +130,6 @@ btr_search_index_init(
/*===============*/
dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
-
index->search_latch =
&btr_search_latch_arr[btr_search_get_key(index->id)];
index->search_table =
diff --git a/storage/xtradb/include/buf0buddy.ic b/storage/xtradb/include/buf0buddy.ic
index 9bc8e9e8762..a5fb510dd19 100644
--- a/storage/xtradb/include/buf0buddy.ic
+++ b/storage/xtradb/include/buf0buddy.ic
@@ -50,7 +50,7 @@ buf_buddy_alloc_low(
allocated from the LRU list and
buf_pool->LRU_list_mutex was
temporarily released */
- MY_ATTRIBUTE((malloc, nonnull));
+ MY_ATTRIBUTE((malloc));
/**********************************************************************//**
Deallocate a block. */
diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h
index 6e705e311d5..1774d9445ff 100644
--- a/storage/xtradb/include/buf0buf.h
+++ b/storage/xtradb/include/buf0buf.h
@@ -243,8 +243,7 @@ buf_relocate(
buf_page_t* bpage, /*!< in/out: control block being relocated;
buf_page_get_state(bpage) must be
BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
- buf_page_t* dpage) /*!< in/out: destination control block */
- MY_ATTRIBUTE((nonnull));
+ buf_page_t* dpage); /*!< in/out: destination control block */
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
@@ -791,7 +790,7 @@ buf_page_print(
ulint flags) /*!< in: 0 or
BUF_PAGE_PRINT_NO_CRASH or
BUF_PAGE_PRINT_NO_FULL */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/********************************************************************//**
Decompress a block.
@return TRUE if successful */
diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h
index cfaf3e12e82..1622b927a76 100644
--- a/storage/xtradb/include/dict0dict.h
+++ b/storage/xtradb/include/dict0dict.h
@@ -762,7 +762,7 @@ ulint
dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check whether the index is unique.
@return nonzero for unique index, zero for other indexes */
@@ -771,7 +771,7 @@ ulint
dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check whether the index is the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
@@ -780,7 +780,7 @@ ulint
dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Check whether the index is a secondary index or the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
@@ -789,7 +789,7 @@ ulint
dict_index_is_sec_or_ibuf(
/*======================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************************
Gets the all the FTS indexes for the table. NOTE: must not be called for
@@ -811,7 +811,7 @@ ulint
dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets the number of system columns in a table in the dictionary cache.
@return number of system (e.g., ROW_ID) columns of a table */
@@ -830,7 +830,7 @@ ulint
dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
- MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets the approximately estimated number of rows in the table.
@return estimated number of rows */
@@ -1784,7 +1784,7 @@ ulint
dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1797,7 +1797,7 @@ dict_set_corrupted(
dict_index_t* index, /*!< in/out: index */
trx_t* trx, /*!< in/out: transaction */
const char* ctx) /*!< in: context */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/**********************************************************************//**
Flags an index corrupted in the data dictionary cache only. This
@@ -1808,8 +1808,7 @@ void
dict_set_corrupted_index_cache_only(
/*================================*/
dict_index_t* index, /*!< in/out: index */
- dict_table_t* table) /*!< in/out: table */
- MY_ATTRIBUTE((nonnull));
+ dict_table_t* table); /*!< in/out: table */
/**********************************************************************//**
Flags a table with specified space_id corrupted in the table dictionary
diff --git a/storage/xtradb/include/dict0dict.ic b/storage/xtradb/include/dict0dict.ic
index 2b63ddea51d..81da2fa5580 100644
--- a/storage/xtradb/include/dict0dict.ic
+++ b/storage/xtradb/include/dict0dict.ic
@@ -267,7 +267,6 @@ dict_index_is_clust(
/*================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_CLUSTERED);
@@ -281,7 +280,6 @@ dict_index_is_unique(
/*=================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_UNIQUE);
@@ -296,7 +294,6 @@ dict_index_is_ibuf(
/*===============*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(index->type & DICT_IBUF);
@@ -328,7 +325,6 @@ dict_index_is_sec_or_ibuf(
{
ulint type;
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
type = index->type;
@@ -346,7 +342,6 @@ dict_table_get_n_user_cols(
/*=======================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols - DATA_N_SYS_COLS);
@@ -378,7 +373,6 @@ dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
{
- ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
return(table->n_cols);
@@ -1550,7 +1544,6 @@ dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return((index->type & DICT_CORRUPT)
diff --git a/storage/xtradb/include/dyn0dyn.h b/storage/xtradb/include/dyn0dyn.h
index 1bd10b6bf58..20963a1472b 100644
--- a/storage/xtradb/include/dyn0dyn.h
+++ b/storage/xtradb/include/dyn0dyn.h
@@ -46,9 +46,8 @@ UNIV_INLINE
dyn_array_t*
dyn_array_create(
/*=============*/
- dyn_array_t* arr) /*!< in/out memory buffer of
+ dyn_array_t* arr); /*!< in/out memory buffer of
size sizeof(dyn_array_t) */
- MY_ATTRIBUTE((nonnull));
/************************************************************//**
Frees a dynamic array. */
UNIV_INLINE
@@ -69,7 +68,7 @@ dyn_array_open(
dyn_array_t* arr, /*!< in: dynamic array */
ulint size) /*!< in: size in bytes of the buffer; MUST be
smaller than DYN_ARRAY_DATA_SIZE! */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
Closes the buffer returned by dyn_array_open. */
UNIV_INLINE
@@ -77,8 +76,7 @@ void
dyn_array_close(
/*============*/
dyn_array_t* arr, /*!< in: dynamic array */
- const byte* ptr) /*!< in: end of used space */
- MY_ATTRIBUTE((nonnull));
+ const byte* ptr); /*!< in: end of used space */
/*********************************************************************//**
Makes room on top of a dyn array and returns a pointer to
the added element. The caller must copy the element to
@@ -90,7 +88,7 @@ dyn_array_push(
/*===========*/
dyn_array_t* arr, /*!< in/out: dynamic array */
ulint size) /*!< in: size in bytes of the element */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Returns pointer to an element in dyn array.
@return pointer to element */
@@ -101,7 +99,7 @@ dyn_array_get_element(
const dyn_array_t* arr, /*!< in: dyn array */
ulint pos) /*!< in: position of element
in bytes from array start */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Returns the size of stored data in a dyn array.
@return data size in bytes */
@@ -110,7 +108,7 @@ ulint
dyn_array_get_data_size(
/*====================*/
const dyn_array_t* arr) /*!< in: dyn array */
- MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Gets the first block in a dyn array.
@param arr dyn array
@@ -144,7 +142,7 @@ ulint
dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
- MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets pointer to the start of data in a dyn array block.
@return pointer to data */
@@ -153,7 +151,7 @@ byte*
dyn_block_get_data(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
- MY_ATTRIBUTE((nonnull, warn_unused_result, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************//**
Pushes n bytes to a dyn array. */
UNIV_INLINE
diff --git a/storage/xtradb/include/dyn0dyn.ic b/storage/xtradb/include/dyn0dyn.ic
index f18f2e6dff9..6e97649245e 100644
--- a/storage/xtradb/include/dyn0dyn.ic
+++ b/storage/xtradb/include/dyn0dyn.ic
@@ -36,7 +36,7 @@ dyn_block_t*
dyn_array_add_block(
/*================*/
dyn_array_t* arr) /*!< in/out: dyn array */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Gets the number of used bytes in a dyn array block.
@@ -47,8 +47,6 @@ dyn_block_get_used(
/*===============*/
const dyn_block_t* block) /*!< in: dyn array block */
{
- ut_ad(block);
-
return((block->used) & ~DYN_BLOCK_FULL_FLAG);
}
@@ -76,7 +74,6 @@ dyn_array_create(
dyn_array_t* arr) /*!< in/out: memory buffer of
size sizeof(dyn_array_t) */
{
- ut_ad(arr);
#if DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG
# error "DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG"
#endif
@@ -119,7 +116,6 @@ dyn_array_push(
dyn_block_t* block;
ulint used;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -159,7 +155,6 @@ dyn_array_open(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
ut_ad(size <= DYN_ARRAY_DATA_SIZE);
ut_ad(size);
@@ -195,7 +190,6 @@ dyn_array_close(
{
dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
block = dyn_array_get_last_block(arr);
@@ -222,7 +216,6 @@ dyn_array_get_element(
{
const dyn_block_t* block;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
/* Get the first array block */
@@ -260,7 +253,6 @@ dyn_array_get_data_size(
const dyn_block_t* block;
ulint sum = 0;
- ut_ad(arr);
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
if (arr->heap == NULL) {
diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h
index 5706f3af4b0..722336dd6b4 100644
--- a/storage/xtradb/include/log0online.h
+++ b/storage/xtradb/include/log0online.h
@@ -38,19 +38,25 @@ log_online_bitmap_file_range_t;
/** An iterator over changed page info */
typedef struct log_bitmap_iterator_struct log_bitmap_iterator_t;
-/*********************************************************************//**
-Initializes the online log following subsytem. */
+/** Initialize the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_init(void);
+
+/** Initialize the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_init(void);
-/*=======================*/
-/*********************************************************************//**
-Shuts down the online log following subsystem. */
+/** Shut down the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_shutdown(void);
-/*===========================*/
+
+/** Shut down the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_shutdown(void);
/*********************************************************************//**
Reads and parses the redo log up to last checkpoint LSN to build the changed
@@ -147,6 +153,8 @@ struct log_online_bitmap_file_range_struct {
/** Struct for an iterator through all bits of changed pages bitmap blocks */
struct log_bitmap_iterator_struct
{
+ lsn_t max_lsn; /*!< End LSN of the
+ range */
ibool failed; /*!< Has the iteration
stopped prematurely */
log_online_bitmap_file_range_t in_files; /*!< The bitmap files
diff --git a/storage/xtradb/include/log0recv.h b/storage/xtradb/include/log0recv.h
index e93ec2666af..e7b6a937f01 100644
--- a/storage/xtradb/include/log0recv.h
+++ b/storage/xtradb/include/log0recv.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -300,20 +301,12 @@ void
recv_sys_var_init(void);
/*===================*/
#endif /* !UNIV_HOTBACKUP */
-/*******************************************************************//**
-Empties the hash table of stored log records, applying them to appropriate
-pages. */
+/** Apply the hash table of stored log records to persistent data pages.
+@param[in] last_batch whether the change buffer merge will be
+ performed as part of the operation */
UNIV_INTERN
-dberr_t
-recv_apply_hashed_log_recs(
-/*=======================*/
- ibool allow_ibuf); /*!< in: if TRUE, also ibuf operations are
- allowed during the application; if FALSE,
- no ibuf operations are allowed, and after
- the application all file pages are flushed to
- disk and invalidated in buffer pool: this
- alternative means that no new log records
- can be generated during the application */
+void
+recv_apply_hashed_log_recs(bool last_batch);
#ifdef UNIV_HOTBACKUP
/*******************************************************************//**
Applies log records in the hash table to a backup. */
@@ -439,6 +432,8 @@ struct recv_sys_t{
scan find a corrupt log block, or a corrupt
log record, or there is a log parsing
buffer overflow */
+ /** the time when progress was last reported */
+ ib_time_t progress_time;
#ifdef UNIV_LOG_ARCHIVE
log_group_t* archive_group;
/*!< in archive recovery: the log group whose
@@ -451,6 +446,20 @@ struct recv_sys_t{
addresses in the hash table */
recv_dblwr_t dblwr;
+
+ /** Determine whether redo log recovery progress should be reported.
+ @param[in] time the current time
+ @return whether progress should be reported
+ (the last report was at least 15 seconds ago) */
+ bool report(ib_time_t time)
+ {
+ if (time - progress_time < 15) {
+ return false;
+ }
+
+ progress_time = time;
+ return true;
+ }
};
/** The recovery system */
diff --git a/storage/xtradb/include/mach0data.h b/storage/xtradb/include/mach0data.h
index 9859def0adc..2e16634a6c2 100644
--- a/storage/xtradb/include/mach0data.h
+++ b/storage/xtradb/include/mach0data.h
@@ -53,7 +53,7 @@ ulint
mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in two consecutive
bytes. We store the most significant byte to the lower address. */
@@ -114,7 +114,7 @@ ulint
mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in four consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -133,7 +133,7 @@ ulint
mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************//**
Writes a ulint in a compressed form (1..5 bytes).
@return stored size in bytes */
@@ -160,7 +160,7 @@ ulint
mach_read_compressed(
/*=================*/
const byte* b) /*!< in: pointer to memory from where to read */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in 6 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -179,7 +179,7 @@ ib_uint64_t
mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in 7 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -198,7 +198,7 @@ ib_uint64_t
mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*******************************************************//**
The following function is used to store data in 8 consecutive
bytes. We store the most significant byte to the lowest address. */
@@ -243,7 +243,7 @@ ib_uint64_t
mach_ull_read_compressed(
/*=====================*/
const byte* b) /*!< in: pointer to memory from where to read */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************//**
Writes a 64-bit integer in a compressed form (1..11 bytes).
@return size in bytes */
@@ -270,7 +270,7 @@ ib_uint64_t
mach_ull_read_much_compressed(
/*==========================*/
const byte* b) /*!< in: pointer to memory from where to read */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/*********************************************************//**
Reads a ulint in a compressed form if the log record fully contains it.
@return pointer to end of the stored field, NULL if not complete */
diff --git a/storage/xtradb/include/mach0data.ic b/storage/xtradb/include/mach0data.ic
index bf2c735b0da..3904d96c09f 100644
--- a/storage/xtradb/include/mach0data.ic
+++ b/storage/xtradb/include/mach0data.ic
@@ -52,7 +52,6 @@ mach_read_from_1(
/*=============*/
const byte* b) /*!< in: pointer to byte */
{
- ut_ad(b);
return((ulint)(b[0]));
}
@@ -132,7 +131,6 @@ mach_read_from_3(
/*=============*/
const byte* b) /*!< in: pointer to 3 bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 16)
| ((ulint)(b[1]) << 8)
| (ulint)(b[2])
@@ -182,7 +180,6 @@ mach_read_from_4(
/*=============*/
const byte* b) /*!< in: pointer to four bytes */
{
- ut_ad(b);
return( ((ulint)(b[0]) << 24)
| ((ulint)(b[1]) << 16)
| ((ulint)(b[2]) << 8)
@@ -261,8 +258,6 @@ mach_read_compressed(
{
ulint flag;
- ut_ad(b);
-
flag = mach_read_from_1(b);
if (flag < 0x80UL) {
@@ -339,8 +334,6 @@ mach_read_from_7(
/*=============*/
const byte* b) /*!< in: pointer to 7 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_3(b), mach_read_from_4(b + 3)));
}
@@ -370,8 +363,6 @@ mach_read_from_6(
/*=============*/
const byte* b) /*!< in: pointer to 6 bytes */
{
- ut_ad(b);
-
return(ut_ull_create(mach_read_from_2(b), mach_read_from_4(b + 2)));
}
@@ -419,8 +410,6 @@ mach_ull_read_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
n = (ib_uint64_t) mach_read_compressed(b);
size = mach_get_compressed_size((ulint) n);
@@ -486,8 +475,6 @@ mach_ull_read_much_compressed(
ib_uint64_t n;
ulint size;
- ut_ad(b);
-
if (*b != (byte)0xFF) {
n = 0;
size = 0;
diff --git a/storage/xtradb/include/mtr0mtr.h b/storage/xtradb/include/mtr0mtr.h
index 23992598f2e..ef6cd61719d 100644
--- a/storage/xtradb/include/mtr0mtr.h
+++ b/storage/xtradb/include/mtr0mtr.h
@@ -235,8 +235,7 @@ UNIV_INTERN
void
mtr_commit(
/*=======*/
- mtr_t* mtr) /*!< in/out: mini-transaction */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in/out: mini-transaction */
/**********************************************************//**
Sets and returns a savepoint in mtr.
@return savepoint */
@@ -354,7 +353,7 @@ mtr_memo_contains(
mtr_t* mtr, /*!< in: mtr */
const void* object, /*!< in: object to search */
ulint type) /*!< in: type of object */
- MY_ATTRIBUTE((warn_unused_result, nonnull));
+ MY_ATTRIBUTE((warn_unused_result));
/**********************************************************//**
Checks if memo contains the given page.
diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h
index f590c5f16ce..d6f0ecfb69c 100644
--- a/storage/xtradb/include/os0file.h
+++ b/storage/xtradb/include/os0file.h
@@ -557,9 +557,10 @@ os_file_create_simple_no_error_handling_func(
value */
__attribute__((nonnull, warn_unused_result));
/****************************************************************//**
-Tries to disable OS caching on an opened file descriptor. */
+Tries to disable OS caching on an opened file descriptor.
+@return true if operation is success and false otherwise */
UNIV_INTERN
-void
+bool
os_file_set_nocache(
/*================*/
os_file_t fd, /*!< in: file descriptor to alter */
diff --git a/storage/xtradb/include/os0thread.h b/storage/xtradb/include/os0thread.h
index 671b9b7dc3f..7865358b0f7 100644
--- a/storage/xtradb/include/os0thread.h
+++ b/storage/xtradb/include/os0thread.h
@@ -131,11 +131,9 @@ os_thread_create_func(
os_thread_id_t* thread_id); /*!< out: id of the created
thread, or NULL */
-/**
-Waits until the specified thread completes and joins it. Its return value is
-ignored.
-
-@param thread thread to join */
+/** Waits until the specified thread completes and joins it.
+Its return value is ignored.
+@param[in,out] thread thread to join */
UNIV_INTERN
void
os_thread_join(
diff --git a/storage/xtradb/include/page0page.h b/storage/xtradb/include/page0page.h
index cb43c937757..eefa0fa4c5b 100644
--- a/storage/xtradb/include/page0page.h
+++ b/storage/xtradb/include/page0page.h
@@ -235,8 +235,7 @@ ulint
page_header_get_offs(
/*=================*/
const page_t* page, /*!< in: page */
- ulint field) /*!< in: PAGE_FREE, ... */
- MY_ATTRIBUTE((nonnull, pure));
+ ulint field); /*!< in: PAGE_FREE, ... */
/*************************************************************//**
Returns the pointer stored in the given header field, or NULL. */
@@ -528,7 +527,7 @@ bool
page_is_leaf(
/*=========*/
const page_t* page) /*!< in: page */
- MY_ATTRIBUTE((nonnull, pure));
+ MY_ATTRIBUTE((warn_unused_result));
/************************************************************//**
Determine whether the page is empty.
@return true if the page is empty (PAGE_N_RECS = 0) */
@@ -849,8 +848,7 @@ page_copy_rec_list_end(
buf_block_t* block, /*!< in: index page containing rec */
rec_t* rec, /*!< in: record on page */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
Copies records from page to new_page, up to the given record, NOT
including that record. Infimum and supremum records are not copied.
@@ -871,8 +869,7 @@ page_copy_rec_list_start(
buf_block_t* block, /*!< in: index page containing rec */
rec_t* rec, /*!< in: record on page */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
Deletes records from a page from a given record onward, including that record.
The infimum and supremum records are not deleted. */
@@ -921,8 +918,7 @@ page_move_rec_list_end(
buf_block_t* block, /*!< in: index page from where to move */
rec_t* split_rec, /*!< in: first record to move */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mtr */
- MY_ATTRIBUTE((nonnull(1, 2, 4, 5)));
+ mtr_t* mtr); /*!< in: mtr */
/*************************************************************//**
Moves record list start to another page. Moved records do not include
split_rec.
@@ -952,8 +948,7 @@ page_dir_split_slot(
page_t* page, /*!< in: index page */
page_zip_des_t* page_zip,/*!< in/out: compressed page whose
uncompressed part will be written, or NULL */
- ulint slot_no)/*!< in: the directory slot */
- MY_ATTRIBUTE((nonnull(1)));
+ ulint slot_no);/*!< in: the directory slot */
/*************************************************************//**
Tries to balance the given directory slot with too few records
with the upper neighbor, so that there are at least the minimum number
@@ -965,8 +960,7 @@ page_dir_balance_slot(
/*==================*/
page_t* page, /*!< in/out: index page */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
- ulint slot_no)/*!< in: the directory slot */
- MY_ATTRIBUTE((nonnull(1)));
+ ulint slot_no);/*!< in: the directory slot */
/**********************************************************//**
Parses a log record of a record list end or start deletion.
@return end of log record or NULL */
diff --git a/storage/xtradb/include/page0page.ic b/storage/xtradb/include/page0page.ic
index 5cf92fd5d8d..364536b86f8 100644
--- a/storage/xtradb/include/page0page.ic
+++ b/storage/xtradb/include/page0page.ic
@@ -156,7 +156,6 @@ page_header_get_offs(
{
ulint offs;
- ut_ad(page);
ut_ad((field == PAGE_FREE)
|| (field == PAGE_LAST_INSERT)
|| (field == PAGE_HEAP_TOP));
diff --git a/storage/xtradb/include/page0zip.h b/storage/xtradb/include/page0zip.h
index 81068e7bd29..adafaa6d8b6 100644
--- a/storage/xtradb/include/page0zip.h
+++ b/storage/xtradb/include/page0zip.h
@@ -132,7 +132,7 @@ page_zip_compress(
dict_index_t* index, /*!< in: index of the B-tree node */
ulint level, /*!< in: compression level */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
- MY_ATTRIBUTE((nonnull(1,2,3)));
+ MY_ATTRIBUTE((warn_unused_result));
/**********************************************************************//**
Decompress a page. This function should tolerate errors on the compressed
@@ -424,8 +424,7 @@ page_zip_reorganize(
out: data, n_blobs,
m_start, m_end, m_nonempty */
dict_index_t* index, /*!< in: index of the B-tree node */
- mtr_t* mtr) /*!< in: mini-transaction */
- MY_ATTRIBUTE((nonnull));
+ mtr_t* mtr); /*!< in: mini-transaction */
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Copy the records of a page byte for byte. Do not copy the page header
@@ -458,7 +457,7 @@ page_zip_parse_compress(
byte* end_ptr,/*!< in: buffer end */
page_t* page, /*!< out: uncompressed page */
page_zip_des_t* page_zip)/*!< out: compressed page */
- MY_ATTRIBUTE((nonnull(1,2)));
+ MY_ATTRIBUTE((warn_unused_result));
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/xtradb/include/rem0rec.h b/storage/xtradb/include/rem0rec.h
index d72f2760a8c..9baf0ab380a 100644
--- a/storage/xtradb/include/rem0rec.h
+++ b/storage/xtradb/include/rem0rec.h
@@ -747,8 +747,7 @@ rec_copy(
/*=====*/
void* buf, /*!< in: buffer */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
- MY_ATTRIBUTE((nonnull));
+ const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
#ifndef UNIV_HOTBACKUP
/**********************************************************//**
Determines the size of a data tuple prefix in a temporary file.
diff --git a/storage/xtradb/include/row0upd.h b/storage/xtradb/include/row0upd.h
index e59ec58b63c..4312fcf7339 100644
--- a/storage/xtradb/include/row0upd.h
+++ b/storage/xtradb/include/row0upd.h
@@ -248,9 +248,8 @@ row_upd_index_replace_new_col_vals_index_pos(
/*!< in: if TRUE, limit the replacement to
ordering fields of index; note that this
does not work for non-clustered indexes. */
- mem_heap_t* heap) /*!< in: memory heap for allocating and
+ mem_heap_t* heap); /*!< in: memory heap for allocating and
copying the new values */
- MY_ATTRIBUTE((nonnull));
/***********************************************************//**
Replaces the new column values stored in the update vector to the index entry
given. */
@@ -311,7 +310,7 @@ row_upd_changes_ord_field_binary_func(
compile time */
const row_ext_t*ext) /*!< NULL, or prefixes of the externally
stored columns in the old row */
- MY_ATTRIBUTE((nonnull(1,2), warn_unused_result));
+ MY_ATTRIBUTE((warn_unused_result));
#ifdef UNIV_DEBUG
# define row_upd_changes_ord_field_binary(index,update,thr,row,ext) \
row_upd_changes_ord_field_binary_func(index,update,thr,row,ext)
diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h
index 8713322a008..a2ea6509852 100644
--- a/storage/xtradb/include/srv0srv.h
+++ b/storage/xtradb/include/srv0srv.h
@@ -526,6 +526,7 @@ extern unsigned long long srv_stats_transient_sample_pages;
extern my_bool srv_stats_persistent;
extern unsigned long long srv_stats_persistent_sample_pages;
extern my_bool srv_stats_auto_recalc;
+extern my_bool srv_stats_include_delete_marked;
extern unsigned long long srv_stats_modified_counter;
extern my_bool srv_stats_sample_traditional;
@@ -1321,4 +1322,12 @@ wsrep_srv_conc_cancel_wait(
thread */
#endif /* WITH_WSREP */
+#ifndef DBUG_OFF
+/** false before InnoDB monitor has been printed at least once, true
+afterwards */
+extern bool srv_debug_monitor_printed;
+#else
+#define srv_debug_monitor_printed false
+#endif
+
#endif
diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h
index 24bbff30986..fc710a86d74 100644
--- a/storage/xtradb/include/trx0trx.h
+++ b/storage/xtradb/include/trx0trx.h
@@ -107,7 +107,7 @@ void
trx_free_prepared(
/*==============*/
trx_t* trx) /*!< in, own: trx object */
- UNIV_COLD MY_ATTRIBUTE((nonnull));
+ UNIV_COLD;
/********************************************************************//**
Frees a transaction object for MySQL. */
UNIV_INTERN
diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i
index ad0565a0290..1e375ba2c09 100644
--- a/storage/xtradb/include/univ.i
+++ b/storage/xtradb/include/univ.i
@@ -45,10 +45,10 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_MAJOR 5
#define INNODB_VERSION_MINOR 6
-#define INNODB_VERSION_BUGFIX 34
+#define INNODB_VERSION_BUGFIX 35
#ifndef PERCONA_INNODB_VERSION
-#define PERCONA_INNODB_VERSION 79.1
+#define PERCONA_INNODB_VERSION 80.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc
index e0259ef80a6..4c654f5272d 100644
--- a/storage/xtradb/log/log0log.cc
+++ b/storage/xtradb/log/log0log.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
-Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2014, 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -48,6 +48,10 @@ Created 12/9/1995 Heikki Tuuri
#endif
#ifndef UNIV_HOTBACKUP
+#if MYSQL_VERSION_ID < 100200
+# include <my_systemd.h> /* sd_notifyf() */
+#endif
+
#include "mem0mem.h"
#include "buf0buf.h"
#include "buf0flu.h"
@@ -1876,7 +1880,7 @@ log_preflush_pool_modified_pages(
and we could not make a new checkpoint on the basis of the
info on the buffer pool only. */
- recv_apply_hashed_log_recs(TRUE);
+ recv_apply_hashed_log_recs(true);
}
if (!buf_page_cleaner_is_active
@@ -2251,7 +2255,7 @@ log_checkpoint(
ut_ad(!srv_read_only_mode);
if (recv_recovery_is_on()) {
- recv_apply_hashed_log_recs(TRUE);
+ recv_apply_hashed_log_recs(true);
}
if (srv_unix_file_flush_method != SRV_UNIX_NOSYNC &&
@@ -2625,6 +2629,13 @@ loop:
start_lsn += len;
buf += len;
+ if (recv_sys->report(ut_time())) {
+ ib_logf(IB_LOG_LEVEL_INFO, "Read redo log up to LSN=" LSN_PF,
+ start_lsn);
+ sd_notifyf(0, "STATUS=Read redo log up to LSN=" LSN_PF,
+ start_lsn);
+ }
+
if (start_lsn != end_lsn) {
if (release_mutex) {
diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc
index 4e6ad65a906..74f2e2360a8 100644
--- a/storage/xtradb/log/log0online.cc
+++ b/storage/xtradb/log/log0online.cc
@@ -77,12 +77,14 @@ struct log_bitmap_struct {
both the correct type and the tree does
not mind its overwrite during
rbt_next() tree traversal. */
- ib_mutex_t mutex; /*!< mutex protecting all the fields.*/
};
/* The log parsing and bitmap output struct instance */
static struct log_bitmap_struct* log_bmp_sys;
+/* Mutex protecting log_bmp_sys */
+static ib_mutex_t log_bmp_sys_mutex;
+
/** File name stem for bitmap files. */
static const char* bmp_file_name_stem = "ib_modified_log_";
@@ -174,28 +176,24 @@ log_online_set_page_bit(
ulint space, /*!<in: log record space id */
ulint page_no)/*!<in: log record page id */
{
- ulint block_start_page;
- ulint block_pos;
- uint bit_pos;
- ib_rbt_bound_t tree_search_pos;
- byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
- byte *page_ptr;
-
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
ut_a(space != ULINT_UNDEFINED);
ut_a(page_no != ULINT_UNDEFINED);
- block_start_page = page_no / MODIFIED_PAGE_BLOCK_ID_COUNT
+ ulint block_start_page = page_no / MODIFIED_PAGE_BLOCK_ID_COUNT
* MODIFIED_PAGE_BLOCK_ID_COUNT;
- block_pos = block_start_page ? (page_no % block_start_page / 8)
+ ulint block_pos = block_start_page ? (page_no % block_start_page / 8)
: (page_no / 8);
- bit_pos = page_no % 8;
+ uint bit_pos = page_no % 8;
+ byte search_page[MODIFIED_PAGE_BLOCK_SIZE];
mach_write_to_4(search_page + MODIFIED_PAGE_SPACE_ID, space);
mach_write_to_4(search_page + MODIFIED_PAGE_1ST_PAGE_ID,
block_start_page);
+ byte *page_ptr;
+ ib_rbt_bound_t tree_search_pos;
if (!rbt_search(log_bmp_sys->modified_pages, &tree_search_pos,
search_page)) {
page_ptr = rbt_value(byte, tree_search_pos.last);
@@ -594,12 +592,19 @@ log_online_is_bitmap_file(
&& (!strcmp(stem, bmp_file_name_stem)));
}
-/*********************************************************************//**
-Initialize the online log following subsytem. */
+/** Initialize the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_init(void)
+{
+ mutex_create(log_bmp_sys_mutex_key, &log_bmp_sys_mutex,
+ SYNC_LOG_ONLINE);
+}
+
+/** Initialize the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_init(void)
-/*======================*/
{
ibool success;
lsn_t tracking_start_lsn
@@ -623,9 +628,6 @@ log_online_read_init(void)
log_bmp_sys->read_buf = static_cast<byte *>
(ut_align(log_bmp_sys->read_buf_ptr, OS_FILE_LOG_BLOCK_SIZE));
- mutex_create(log_bmp_sys_mutex_key, &log_bmp_sys->mutex,
- SYNC_LOG_ONLINE);
-
/* Initialize bitmap file directory from srv_data_home and add a path
separator if needed. */
srv_data_home_len = strlen(srv_data_home);
@@ -760,13 +762,15 @@ log_online_read_init(void)
log_set_tracked_lsn(tracking_start_lsn);
}
-/*********************************************************************//**
-Shut down the online log following subsystem. */
+/** Shut down the dynamic part of the log tracking subsystem */
UNIV_INTERN
void
log_online_read_shutdown(void)
-/*==========================*/
{
+ mutex_enter(&log_bmp_sys_mutex);
+
+ srv_track_changed_pages = FALSE;
+
ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list;
if (log_bmp_sys->out.file != os_file_invalid) {
@@ -782,10 +786,21 @@ log_online_read_shutdown(void)
free_list_node = next;
}
- mutex_free(&log_bmp_sys->mutex);
-
ut_free(log_bmp_sys->read_buf_ptr);
ut_free(log_bmp_sys);
+ log_bmp_sys = NULL;
+
+ srv_redo_log_thread_started = false;
+
+ mutex_exit(&log_bmp_sys_mutex);
+}
+
+/** Shut down the constant part of the log tracking subsystem */
+UNIV_INTERN
+void
+log_online_shutdown(void)
+{
+ mutex_free(&log_bmp_sys_mutex);
}
/*********************************************************************//**
@@ -831,13 +846,12 @@ void
log_online_parse_redo_log(void)
/*===========================*/
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
byte *ptr = log_bmp_sys->parse_buf;
byte *end = log_bmp_sys->parse_buf_end;
-
ulint len = 0;
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
while (ptr != end
&& log_bmp_sys->next_parse_lsn < log_bmp_sys->end_lsn) {
@@ -919,6 +933,8 @@ log_online_add_to_parse_buf(
ulint skip_len) /*!< in: how much of log data to
skip */
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
ulint start_offset = skip_len ? skip_len : LOG_BLOCK_HDR_SIZE;
ulint end_offset
= (data_len == OS_FILE_LOG_BLOCK_SIZE)
@@ -927,8 +943,6 @@ log_online_add_to_parse_buf(
ulint actual_data_len = (end_offset >= start_offset)
? end_offset - start_offset : 0;
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
ut_memcpy(log_bmp_sys->parse_buf_end, log_block + start_offset,
actual_data_len);
@@ -951,11 +965,9 @@ log_online_parse_redo_log_block(
log data should be skipped as
they were parsed before */
{
- ulint block_data_len;
-
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
- block_data_len = log_block_get_data_len(log_block);
+ ulint block_data_len = log_block_get_data_len(log_block);
ut_ad(block_data_len % OS_FILE_LOG_BLOCK_SIZE == 0
|| block_data_len < OS_FILE_LOG_BLOCK_SIZE);
@@ -975,14 +987,14 @@ log_online_follow_log_seg(
lsn_t block_start_lsn, /*!< in: the LSN to read from */
lsn_t block_end_lsn) /*!< in: the LSN to read to */
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
/* Pointer to the current OS_FILE_LOG_BLOCK-sized chunk of the read log
data to parse */
byte* log_block = log_bmp_sys->read_buf;
byte* log_block_end = log_bmp_sys->read_buf
+ (block_end_lsn - block_start_lsn);
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
mutex_enter(&log_sys->mutex);
log_group_read_log_seg(LOG_RECOVER, log_bmp_sys->read_buf,
group, block_start_lsn, block_end_lsn, TRUE);
@@ -1042,11 +1054,11 @@ log_online_follow_log_group(
lsn_t contiguous_lsn) /*!< in: the LSN of log block start
containing the log_parse_start_lsn */
{
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
+
lsn_t block_start_lsn = contiguous_lsn;
lsn_t block_end_lsn;
- ut_ad(mutex_own(&log_bmp_sys->mutex));
-
log_bmp_sys->next_parse_lsn = log_bmp_sys->start_lsn;
log_bmp_sys->parse_buf_end = log_bmp_sys->parse_buf;
@@ -1083,21 +1095,29 @@ log_online_write_bitmap_page(
/*=========================*/
const byte *block) /*!< in: block to write */
{
- ibool success;
-
- ut_ad(srv_track_changed_pages);
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
/* Simulate a write error */
DBUG_EXECUTE_IF("bitmap_page_write_error",
- ib_logf(IB_LOG_LEVEL_ERROR,
- "simulating bitmap write error in "
- "log_online_write_bitmap_page");
- return FALSE;);
-
- success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file,
- block, log_bmp_sys->out.offset,
- MODIFIED_PAGE_BLOCK_SIZE);
+ {
+ ulint space_id
+ = mach_read_from_4(block
+ + MODIFIED_PAGE_SPACE_ID);
+ if (space_id > 0) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "simulating bitmap write "
+ "error in "
+ "log_online_write_bitmap_page "
+ "for space ID %lu",
+ space_id);
+ return FALSE;
+ }
+ });
+
+ ibool success = os_file_write(log_bmp_sys->out.name,
+ log_bmp_sys->out.file, block,
+ log_bmp_sys->out.offset,
+ MODIFIED_PAGE_BLOCK_SIZE);
if (UNIV_UNLIKELY(!success)) {
/* The following call prints an error message */
@@ -1136,11 +1156,7 @@ ibool
log_online_write_bitmap(void)
/*=========================*/
{
- ib_rbt_node_t *bmp_tree_node;
- const ib_rbt_node_t *last_bmp_tree_node;
- ibool success = TRUE;
-
- ut_ad(mutex_own(&log_bmp_sys->mutex));
+ ut_ad(mutex_own(&log_bmp_sys_mutex));
if (log_bmp_sys->out.offset >= srv_max_bitmap_file_size) {
if (!log_online_rotate_bitmap_file(log_bmp_sys->start_lsn)) {
@@ -1148,9 +1164,12 @@ log_online_write_bitmap(void)
}
}
- bmp_tree_node = (ib_rbt_node_t *)
- rbt_first(log_bmp_sys->modified_pages);
- last_bmp_tree_node = rbt_last(log_bmp_sys->modified_pages);
+ ib_rbt_node_t *bmp_tree_node
+ = (ib_rbt_node_t *)rbt_first(log_bmp_sys->modified_pages);
+ const ib_rbt_node_t * const last_bmp_tree_node
+ = rbt_last(log_bmp_sys->modified_pages);
+
+ ibool success = TRUE;
while (bmp_tree_node) {
@@ -1183,9 +1202,11 @@ log_online_write_bitmap(void)
rbt_next(log_bmp_sys->modified_pages, bmp_tree_node);
DBUG_EXECUTE_IF("bitmap_page_2_write_error",
- ut_ad(bmp_tree_node); /* 2nd page must exist */
- DBUG_SET("+d,bitmap_page_write_error");
- DBUG_SET("-d,bitmap_page_2_write_error"););
+ if (bmp_tree_node)
+ {
+ DBUG_SET("+d,bitmap_page_write_error");
+ DBUG_SET("-d,bitmap_page_2_write_error");
+ });
}
rbt_reset(log_bmp_sys->modified_pages);
@@ -1206,10 +1227,19 @@ log_online_follow_redo_log(void)
log_group_t* group;
ibool result;
- ut_ad(srv_track_changed_pages);
ut_ad(!srv_read_only_mode);
- mutex_enter(&log_bmp_sys->mutex);
+ if (!srv_track_changed_pages)
+ return TRUE;
+
+ DEBUG_SYNC_C("log_online_follow_redo_log");
+
+ mutex_enter(&log_bmp_sys_mutex);
+
+ if (!srv_track_changed_pages) {
+ mutex_exit(&log_bmp_sys_mutex);
+ return TRUE;
+ }
/* Grab the LSN of the last checkpoint, we will parse up to it */
mutex_enter(&(log_sys->mutex));
@@ -1217,7 +1247,7 @@ log_online_follow_redo_log(void)
mutex_exit(&(log_sys->mutex));
if (log_bmp_sys->end_lsn == log_bmp_sys->start_lsn) {
- mutex_exit(&log_bmp_sys->mutex);
+ mutex_exit(&log_bmp_sys_mutex);
return TRUE;
}
@@ -1240,7 +1270,7 @@ log_online_follow_redo_log(void)
log_bmp_sys->start_lsn = log_bmp_sys->end_lsn;
log_set_tracked_lsn(log_bmp_sys->start_lsn);
- mutex_exit(&log_bmp_sys->mutex);
+ mutex_exit(&log_bmp_sys_mutex);
return result;
}
@@ -1587,6 +1617,8 @@ log_online_bitmap_iterator_init(
{
ut_a(i);
+ i->max_lsn = max_lsn;
+
if (UNIV_UNLIKELY(min_lsn > max_lsn)) {
/* Empty range */
@@ -1695,6 +1727,9 @@ log_online_bitmap_iterator_next(
return TRUE;
}
+ if (i->end_lsn >= i->max_lsn && i->last_page_in_run)
+ return FALSE;
+
while (!checksum_ok)
{
while (i->in.size < MODIFIED_PAGE_BLOCK_SIZE
@@ -1790,15 +1825,21 @@ log_online_purge_changed_page_bitmaps(
lsn = LSN_MAX;
}
+ bool log_bmp_sys_inited = false;
if (srv_redo_log_thread_started) {
/* User requests might happen with both enabled and disabled
tracking */
- mutex_enter(&log_bmp_sys->mutex);
+ log_bmp_sys_inited = true;
+ mutex_enter(&log_bmp_sys_mutex);
+ if (!srv_redo_log_thread_started) {
+ log_bmp_sys_inited = false;
+ mutex_exit(&log_bmp_sys_mutex);
+ }
}
if (!log_online_setup_bitmap_file_range(&bitmap_files, 0, LSN_MAX)) {
- if (srv_redo_log_thread_started) {
- mutex_exit(&log_bmp_sys->mutex);
+ if (log_bmp_sys_inited) {
+ mutex_exit(&log_bmp_sys_mutex);
}
return TRUE;
}
@@ -1836,7 +1877,7 @@ log_online_purge_changed_page_bitmaps(
}
}
- if (srv_redo_log_thread_started) {
+ if (log_bmp_sys_inited) {
if (lsn > log_bmp_sys->end_lsn) {
lsn_t new_file_lsn;
if (lsn == LSN_MAX) {
@@ -1852,7 +1893,7 @@ log_online_purge_changed_page_bitmaps(
}
}
- mutex_exit(&log_bmp_sys->mutex);
+ mutex_exit(&log_bmp_sys_mutex);
}
free(bitmap_files.files);
diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc
index 6405a87a2bd..a20cb8dec4d 100644
--- a/storage/xtradb/log/log0recv.cc
+++ b/storage/xtradb/log/log0recv.cc
@@ -2,7 +2,7 @@
Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2013, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2013, 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -85,7 +85,7 @@ this must be less than UNIV_PAGE_SIZE as it is stored in the buffer pool */
#define RECV_READ_AHEAD_AREA 32
/** The recovery system */
-UNIV_INTERN recv_sys_t* recv_sys = NULL;
+UNIV_INTERN recv_sys_t* recv_sys;
/** TRUE when applying redo log records during crash recovery; FALSE
otherwise. Note that this is FALSE while a background thread is
rolling back incomplete transactions. */
@@ -137,9 +137,6 @@ UNIV_INTERN ibool recv_is_making_a_backup = FALSE;
UNIV_INTERN ibool recv_is_from_backup = FALSE;
# define buf_pool_get_curr_size() (5 * 1024 * 1024)
#endif /* !UNIV_HOTBACKUP */
-/** The following counter is used to decide when to print info on
-log scan */
-static ulint recv_scan_print_counter;
/** The type of the previous parsed redo log record */
static ulint recv_previous_parsed_rec_type;
@@ -310,8 +307,6 @@ recv_sys_var_init(void)
recv_no_ibuf_operations = FALSE;
- recv_scan_print_counter = 0;
-
recv_previous_parsed_rec_type = 999999;
recv_previous_parsed_rec_offset = 0;
@@ -422,6 +417,7 @@ recv_sys_init(
recv_sys->last_block_buf_start, OS_FILE_LOG_BLOCK_SIZE));
recv_sys->found_corrupt_log = FALSE;
+ recv_sys->progress_time = ut_time();
recv_max_page_lsn = 0;
@@ -431,33 +427,18 @@ recv_sys_init(
mutex_exit(&(recv_sys->mutex));
}
-/********************************************************//**
-Empties the hash table when it has been fully processed.
-@return DB_SUCCESS when successfull or DB_ERROR when fails. */
+/** Empty a fully processed hash table. */
static
-dberr_t
-recv_sys_empty_hash(void)
-/*=====================*/
+void
+recv_sys_empty_hash()
{
ut_ad(mutex_own(&(recv_sys->mutex)));
-
- if (recv_sys->n_addrs != 0) {
- fprintf(stderr,
- "InnoDB: Error: %lu pages with log records"
- " were left unprocessed!\n"
- "InnoDB: Maximum page number with"
- " log records on it %lu\n",
- (ulong) recv_sys->n_addrs,
- (ulong) recv_max_parsed_page_no);
- return DB_ERROR;
- }
+ ut_a(recv_sys->n_addrs == 0);
hash_table_free(recv_sys->addr_hash);
mem_heap_empty(recv_sys->heap);
recv_sys->addr_hash = hash_create(buf_pool_get_curr_size() / 512);
-
- return DB_SUCCESS;
}
#ifndef UNIV_HOTBACKUP
@@ -1804,6 +1785,8 @@ recv_recover_page_func(
mtr_commit(&mtr);
+ ib_time_t time = ut_time();
+
mutex_enter(&(recv_sys->mutex));
if (recv_max_page_lsn < page_lsn) {
@@ -1812,11 +1795,17 @@ recv_recover_page_func(
recv_addr->state = RECV_PROCESSED;
- ut_a(recv_sys->n_addrs);
- recv_sys->n_addrs--;
-
- mutex_exit(&(recv_sys->mutex));
+ ut_a(recv_sys->n_addrs > 0);
+ if (ulint n = --recv_sys->n_addrs) {
+ if (recv_sys->report(time)) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "To recover: " ULINTPF " pages from log", n);
+ sd_notifyf(0, "STATUS=To recover: " ULINTPF
+ " pages from log", n);
+ }
+ }
+ mutex_exit(&recv_sys->mutex);
}
#ifndef UNIV_HOTBACKUP
@@ -1862,62 +1851,50 @@ recv_read_in_area(
}
buf_read_recv_pages(FALSE, space, zip_size, page_nos, n);
- /*
- fprintf(stderr, "Recv pages at %lu n %lu\n", page_nos[0], n);
- */
return(n);
}
-/*******************************************************************//**
-Empties the hash table of stored log records, applying them to appropriate
-pages.
-@return DB_SUCCESS when successfull or DB_ERROR when fails. */
+/** Apply the hash table of stored log records to persistent data pages.
+@param[in] last_batch whether the change buffer merge will be
+ performed as part of the operation */
UNIV_INTERN
-dberr_t
-recv_apply_hashed_log_recs(
-/*=======================*/
- ibool allow_ibuf) /*!< in: if TRUE, also ibuf operations are
- allowed during the application; if FALSE,
- no ibuf operations are allowed, and after
- the application all file pages are flushed to
- disk and invalidated in buffer pool: this
- alternative means that no new log records
- can be generated during the application;
- the caller must in this case own the log
- mutex */
+void
+recv_apply_hashed_log_recs(bool last_batch)
{
- recv_addr_t* recv_addr;
- ulint i;
- ibool has_printed = FALSE;
- ulong progress;
- mtr_t mtr;
- dberr_t err = DB_SUCCESS;
-loop:
- mutex_enter(&(recv_sys->mutex));
-
- if (recv_sys->apply_batch_on) {
+ for (;;) {
+ mutex_enter(&recv_sys->mutex);
- mutex_exit(&(recv_sys->mutex));
+ if (!recv_sys->apply_batch_on) {
+ break;
+ }
+ mutex_exit(&recv_sys->mutex);
os_thread_sleep(500000);
-
- goto loop;
}
- ut_ad((allow_ibuf == 0) == (mutex_own(&log_sys->mutex) != 0));
+ ut_ad(!last_batch == mutex_own(&log_sys->mutex));
- if (!allow_ibuf) {
+ if (!last_batch) {
recv_no_ibuf_operations = TRUE;
}
+ if (ulint n = recv_sys->n_addrs) {
+ const char* msg = last_batch
+ ? "Starting final batch to recover "
+ : "Starting a batch to recover ";
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "%s" ULINTPF " pages from redo log", msg, n);
+ sd_notifyf(0, "STATUS=%s" ULINTPF " pages from redo log",
+ msg, n);
+ }
+
recv_sys->apply_log_recs = TRUE;
recv_sys->apply_batch_on = TRUE;
- for (i = 0; i < hash_get_n_cells(recv_sys->addr_hash); i++) {
-
- for (recv_addr = static_cast<recv_addr_t*>(
- HASH_GET_FIRST(recv_sys->addr_hash, i));
- recv_addr != 0;
+ for (ulint i = 0; i < hash_get_n_cells(recv_sys->addr_hash); i++) {
+ for (recv_addr_t* recv_addr = static_cast<recv_addr_t*>(
+ HASH_GET_FIRST(recv_sys->addr_hash, i));
+ recv_addr;
recv_addr = static_cast<recv_addr_t*>(
HASH_GET_NEXT(addr_hash, recv_addr))) {
@@ -1926,24 +1903,12 @@ loop:
ulint page_no = recv_addr->page_no;
if (recv_addr->state == RECV_NOT_PROCESSED) {
- if (!has_printed) {
- ib_logf(IB_LOG_LEVEL_INFO,
- "Starting an apply batch"
- " of log records"
- " to the database...");
- fputs("InnoDB: Progress in percent: ",
- stderr);
- has_printed = TRUE;
- }
-
- mutex_exit(&(recv_sys->mutex));
+ mutex_exit(&recv_sys->mutex);
if (buf_page_peek(space, page_no)) {
- buf_block_t* block;
-
+ mtr_t mtr;
mtr_start(&mtr);
-
- block = buf_page_get(
+ buf_block_t* block = buf_page_get(
space, zip_size, page_no,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(
@@ -1956,21 +1921,9 @@ loop:
page_no);
}
- mutex_enter(&(recv_sys->mutex));
+ mutex_enter(&recv_sys->mutex);
}
}
-
- progress = (ulong) (i * 100)
- / hash_get_n_cells(recv_sys->addr_hash);
- if (has_printed
- && progress
- != ((i + 1) * 100)
- / hash_get_n_cells(recv_sys->addr_hash)) {
-
- fprintf(stderr, "%lu ", progress);
- sd_notifyf(0, "STATUS=Applying batch of log records for"
- " InnoDB: Progress %lu", progress);
- }
}
/* Wait until all the pages have been processed */
@@ -1984,12 +1937,7 @@ loop:
mutex_enter(&(recv_sys->mutex));
}
- if (has_printed) {
-
- fprintf(stderr, "\n");
- }
-
- if (!allow_ibuf) {
+ if (!last_batch) {
bool success;
/* Flush all the file pages to disk and invalidate them in
@@ -2027,16 +1975,9 @@ loop:
recv_sys->apply_log_recs = FALSE;
recv_sys->apply_batch_on = FALSE;
- err = recv_sys_empty_hash();
-
- if (has_printed) {
- fprintf(stderr, "InnoDB: Apply batch completed\n");
- sd_notify(0, "STATUS=InnoDB: Apply batch completed");
- }
-
- mutex_exit(&(recv_sys->mutex));
+ recv_sys_empty_hash();
- return err;
+ mutex_exit(&recv_sys->mutex);
}
#else /* !UNIV_HOTBACKUP */
/*******************************************************************//**
@@ -2059,11 +2000,6 @@ recv_apply_log_recs_for_backup(void)
block = back_block1;
- ib_logf(IB_LOG_LEVEL_INFO,
- "Starting an apply batch of log records to the database...");
-
- fputs("InnoDB: Progress in percent: ", stderr);
-
n_hash_cells = hash_get_n_cells(recv_sys->addr_hash);
for (i = 0; i < n_hash_cells; i++) {
@@ -2177,16 +2113,6 @@ recv_apply_log_recs_for_backup(void)
skip_this_recv_addr:
recv_addr = HASH_GET_NEXT(addr_hash, recv_addr);
}
-
- if ((100 * i) / n_hash_cells
- != (100 * (i + 1)) / n_hash_cells) {
- fprintf(stderr, "%lu ",
- (ulong) ((100 * i) / n_hash_cells));
- fflush(stderr);
- sd_notifyf(0, "STATUS=Applying batch of log records for"
- " backup InnoDB: Progress %lu",
- (ulong) (100 * i) / n_hash_cells);
- }
}
sd_notify(0, "STATUS=InnoDB: Apply batch for backup completed");
@@ -2889,11 +2815,10 @@ recv_scan_log_recs(
#ifndef UNIV_HOTBACKUP
if (recv_log_scan_is_startup_type
&& !recv_needed_recovery) {
-
if (!srv_read_only_mode) {
ib_logf(IB_LOG_LEVEL_INFO,
- "Log scan progressed past the "
- "checkpoint lsn " LSN_PF "",
+ "Starting crash recovery from "
+ "checkpoint LSN=" LSN_PF,
recv_sys->scanned_lsn);
recv_init_crash_recovery();
@@ -2953,19 +2878,6 @@ recv_scan_log_recs(
*group_scanned_lsn = scanned_lsn;
- if (recv_needed_recovery
- || (recv_is_from_backup && !recv_is_making_a_backup)) {
- recv_scan_print_counter++;
-
- if (finished || (recv_scan_print_counter % 80 == 0)) {
-
- fprintf(stderr,
- "InnoDB: Doing recovery: scanned up to"
- " log sequence number " LSN_PF "\n",
- *group_scanned_lsn);
- }
- }
-
if (more_data && !recv_sys->found_corrupt_log) {
/* Try to parse more log records */
@@ -2985,12 +2897,7 @@ recv_scan_log_recs(
log yet: they would be produced by ibuf
operations */
- *err = recv_apply_hashed_log_recs(FALSE);
-
- if (*err != DB_SUCCESS) {
- /* Finish processing because of error */
- return (TRUE);
- }
+ recv_apply_hashed_log_recs(false);
}
#endif /* !UNIV_HOTBACKUP */
@@ -3074,11 +2981,6 @@ recv_init_crash_recovery(void)
recv_needed_recovery = TRUE;
- ib_logf(IB_LOG_LEVEL_INFO, "Database was not shutdown normally!");
- ib_logf(IB_LOG_LEVEL_INFO, "Starting crash recovery.");
- ib_logf(IB_LOG_LEVEL_INFO,
- "Reading tablespace information from the .ibd files...");
-
fil_load_single_table_tablespaces();
/* If we are using the doublewrite method, we will
@@ -3089,9 +2991,7 @@ recv_init_crash_recovery(void)
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
ib_logf(IB_LOG_LEVEL_INFO,
- "Restoring possible half-written data pages ");
-
- ib_logf(IB_LOG_LEVEL_INFO,
+ "Restoring possible half-written data pages "
"from the doublewrite buffer...");
buf_dblwr_process();
diff --git a/storage/xtradb/mach/mach0data.cc b/storage/xtradb/mach/mach0data.cc
index 206434dc5ab..feeedb01609 100644
--- a/storage/xtradb/mach/mach0data.cc
+++ b/storage/xtradb/mach/mach0data.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -55,7 +55,6 @@ mach_parse_compressed(
if (flag < 0x80UL) {
*val = flag;
return(ptr + 1);
-
}
/* Workaround GCC bug
@@ -64,7 +63,11 @@ mach_parse_compressed(
function, causing and out-of-bounds read if we are reading a short
integer close to the end of buffer. */
#if defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__clang__)
- asm volatile("": : :"memory");
+#define DEPLOY_FENCE
+#endif
+
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
#endif
if (flag < 0xC0UL) {
@@ -75,8 +78,13 @@ mach_parse_compressed(
*val = mach_read_from_2(ptr) & 0x7FFFUL;
return(ptr + 2);
+ }
- } else if (flag < 0xE0UL) {
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#endif
+
+ if (flag < 0xE0UL) {
if (end_ptr < ptr + 3) {
return(NULL);
}
@@ -84,7 +92,13 @@ mach_parse_compressed(
*val = mach_read_from_3(ptr) & 0x3FFFFFUL;
return(ptr + 3);
- } else if (flag < 0xF0UL) {
+ }
+
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#endif
+
+ if (flag < 0xF0UL) {
if (end_ptr < ptr + 4) {
return(NULL);
}
@@ -92,14 +106,20 @@ mach_parse_compressed(
*val = mach_read_from_4(ptr) & 0x1FFFFFFFUL;
return(ptr + 4);
- } else {
- ut_ad(flag == 0xF0UL);
+ }
- if (end_ptr < ptr + 5) {
- return(NULL);
- }
+#ifdef DEPLOY_FENCE
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#endif
- *val = mach_read_from_4(ptr + 1);
- return(ptr + 5);
+#undef DEPLOY_FENCE
+
+ ut_ad(flag == 0xF0UL);
+
+ if (end_ptr < ptr + 5) {
+ return(NULL);
}
+
+ *val = mach_read_from_4(ptr + 1);
+ return(ptr + 5);
}
diff --git a/storage/xtradb/mtr/mtr0mtr.cc b/storage/xtradb/mtr/mtr0mtr.cc
index a1d7261e43c..e564b270d00 100644
--- a/storage/xtradb/mtr/mtr0mtr.cc
+++ b/storage/xtradb/mtr/mtr0mtr.cc
@@ -312,7 +312,6 @@ mtr_commit(
/*=======*/
mtr_t* mtr) /*!< in: mini-transaction */
{
- ut_ad(mtr);
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(!mtr->inside_ibuf);
diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc
index c5be6d45c0e..cdc3df5e851 100644
--- a/storage/xtradb/os/os0file.cc
+++ b/storage/xtradb/os/os0file.cc
@@ -1234,50 +1234,15 @@ next_file:
char* full_path;
int ret;
struct stat statinfo;
-#ifdef HAVE_READDIR_R
- char dirent_buf[sizeof(struct dirent)
- + _POSIX_PATH_MAX + 100];
- /* In /mysys/my_lib.c, _POSIX_PATH_MAX + 1 is used as
- the max file name len; but in most standards, the
- length is NAME_MAX; we add 100 to be even safer */
-#endif
next_file:
-#ifdef HAVE_READDIR_R
- ret = readdir_r(dir, (struct dirent*) dirent_buf, &ent);
-
- if (ret != 0
-#ifdef UNIV_AIX
- /* On AIX, only if we got non-NULL 'ent' (result) value and
- a non-zero 'ret' (return) value, it indicates a failed
- readdir_r() call. An NULL 'ent' with an non-zero 'ret'
- would indicate the "end of the directory" is reached. */
- && ent != NULL
-#endif
- ) {
- fprintf(stderr,
- "InnoDB: cannot read directory %s, error %lu\n",
- dirname, (ulong) ret);
-
- return(-1);
- }
-
- if (ent == NULL) {
- /* End of directory */
-
- return(1);
- }
-
- ut_a(strlen(ent->d_name) < _POSIX_PATH_MAX + 100 - 1);
-#else
ent = readdir(dir);
if (ent == NULL) {
return(1);
}
-#endif
ut_a(strlen(ent->d_name) < OS_FILE_MAX_PATH);
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) {
@@ -1605,9 +1570,13 @@ os_file_set_nocache_if_needed(os_file_t file, const char* name,
if (srv_unix_file_flush_method == SRV_UNIX_ALL_O_DIRECT
|| (type == OS_DATA_FILE
&& (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
- || (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)))) {
- os_file_set_nocache(file, name, mode_str);
- }
+ || (srv_unix_file_flush_method
+ == SRV_UNIX_O_DIRECT_NO_FSYNC))))
+ /* Do fsync() on log files when setting O_DIRECT fails.
+ See log_io_complete() */
+ if (!os_file_set_nocache(file, name, mode_str)
+ && srv_unix_file_flush_method == SRV_UNIX_ALL_O_DIRECT)
+ srv_unix_file_flush_method = SRV_UNIX_O_DIRECT;
}
/****************************************************************//**
@@ -1815,9 +1784,10 @@ os_file_create_simple_no_error_handling_func(
}
/****************************************************************//**
-Tries to disable OS caching on an opened file descriptor. */
+Tries to disable OS caching on an opened file descriptor.
+@return TRUE if operation is success and FALSE otherwise */
UNIV_INTERN
-void
+bool
os_file_set_nocache(
/*================*/
os_file_t fd /*!< in: file descriptor to alter */
@@ -1838,6 +1808,7 @@ os_file_set_nocache(
"Failed to set DIRECTIO_ON on file %s: %s: %s, "
"continuing anyway.",
file_name, operation_name, strerror(errno_save));
+ return false;
}
#elif defined(O_DIRECT)
if (fcntl(fd, F_SETFL, O_DIRECT) == -1) {
@@ -1868,8 +1839,10 @@ short_warning:
"continuing anyway.",
file_name, operation_name, strerror(errno_save));
}
+ return false;
}
#endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */
+ return true;
}
diff --git a/storage/xtradb/os/os0thread.cc b/storage/xtradb/os/os0thread.cc
index 5ddc40b0eeb..8baf06b9bb7 100644
--- a/storage/xtradb/os/os0thread.cc
+++ b/storage/xtradb/os/os0thread.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -206,29 +206,32 @@ os_thread_create_func(
#endif
}
-/**
-Waits until the specified thread completes and joins it. Its return value is
-ignored.
-
-@param thread thread to join */
+/** Waits until the specified thread completes and joins it.
+Its return value is ignored.
+@param[in,out] thread thread to join */
UNIV_INTERN
void
os_thread_join(
os_thread_t thread)
{
- /*This function is currently only used to workaround glibc bug
+ /* This function is currently only used to workaround glibc bug
described in http://bugs.mysql.com/bug.php?id=82886
On Windows, no workarounds are necessary, all threads
are "detached" upon thread exit (handle is closed), so we do
nothing.
*/
-#ifndef _WIN32
- int ret MY_ATTRIBUTE((unused)) = pthread_join(thread, NULL);
+#ifdef __WIN__
+ /* Do nothing. */
+#else
+#ifdef UNIV_DEBUG
+ const int ret MY_ATTRIBUTE((unused)) =
+#endif /* UNIV_DEBUG */
+ pthread_join(thread, NULL);
- /* Waiting on already-quit threads is allowed */
+ /* Waiting on already-quit threads is allowed. */
ut_ad(ret == 0 || ret == ESRCH);
-#endif
+#endif /* __WIN__ */
}
/*****************************************************************//**
@@ -257,8 +260,9 @@ os_thread_exit(
#ifdef __WIN__
ExitThread((DWORD) exit_value);
#else
- if (detach)
+ if (detach) {
pthread_detach(pthread_self());
+ }
pthread_exit(exit_value);
#endif
}
diff --git a/storage/xtradb/page/page0page.cc b/storage/xtradb/page/page0page.cc
index a6fba4074ef..3f8e47adafd 100644
--- a/storage/xtradb/page/page0page.cc
+++ b/storage/xtradb/page/page0page.cc
@@ -1455,7 +1455,6 @@ page_dir_split_slot(
ulint i;
ulint n_owned;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
@@ -1517,7 +1516,6 @@ page_dir_balance_slot(
rec_t* old_rec;
rec_t* new_rec;
- ut_ad(page);
ut_ad(!page_zip || page_is_comp(page));
ut_ad(slot_no > 0);
diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc
index 04340c0f3d2..32e76fb44e6 100644
--- a/storage/xtradb/page/page0zip.cc
+++ b/storage/xtradb/page/page0zip.cc
@@ -4810,8 +4810,6 @@ page_zip_parse_compress(
ulint size;
ulint trailer_size;
- ut_ad(ptr != NULL);
- ut_ad(end_ptr != NULL);
ut_ad(!page == !page_zip);
if (UNIV_UNLIKELY(ptr + (2 + 2) > end_ptr)) {
diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc
index b9496b7f620..6770748c38b 100644
--- a/storage/xtradb/rem/rem0rec.cc
+++ b/storage/xtradb/rem/rem0rec.cc
@@ -789,7 +789,7 @@ rec_get_nth_field_offs_old(
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
-UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)))
+UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
ulint
rec_get_converted_size_comp_prefix_low(
/*===================================*/
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index 2a4805a4714..7bf5c98ee5f 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -1073,14 +1073,8 @@ row_merge_read_rec(
ulint data_size;
ulint avail_size;
- ut_ad(block);
- ut_ad(buf);
ut_ad(b >= &block[0]);
ut_ad(b < &block[srv_sort_buf_size]);
- ut_ad(index);
- ut_ad(foffs);
- ut_ad(mrec);
- ut_ad(offsets);
ut_ad(*offsets == 1 + REC_OFFS_HEADER_SIZE
+ dict_index_get_n_fields(index));
@@ -4172,8 +4166,8 @@ wait_again:
for (j = 0; j < FTS_NUM_AUX_INDEX;
j++) {
- os_thread_join(merge_info[j]
- .thread_hdl);
+ os_thread_join(merge_info[j]
+ .thread_hdl);
}
}
} else {
diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc
index c81b10b93f1..3ddb8654f69 100644
--- a/storage/xtradb/row/row0mysql.cc
+++ b/storage/xtradb/row/row0mysql.cc
@@ -1373,6 +1373,8 @@ run_again:
row_ins_step(thr);
+ DEBUG_SYNC_C("ib_after_row_insert_step");
+
err = trx->error_state;
if (err != DB_SUCCESS) {
diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc
index bc2e0b0e1cb..35b3520749b 100644
--- a/storage/xtradb/row/row0purge.cc
+++ b/storage/xtradb/row/row0purge.cc
@@ -897,7 +897,7 @@ row_purge_record_func(
Fetches an undo log record and does the purge for the recorded operation.
If none left, or the current purge completed, returns the control to the
parent node, which is always a query thread node. */
-static MY_ATTRIBUTE((nonnull))
+static
void
row_purge(
/*======*/
diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc
index 6288251da77..a203872e3bb 100644
--- a/storage/xtradb/row/row0upd.cc
+++ b/storage/xtradb/row/row0upd.cc
@@ -1285,8 +1285,6 @@ row_upd_index_replace_new_col_vals_index_pos(
ulint n_fields;
const ulint zip_size = dict_table_zip_size(index->table);
- ut_ad(index);
-
dtuple_set_info_bits(entry, update->info_bits);
if (order_only) {
@@ -1471,8 +1469,6 @@ row_upd_changes_ord_field_binary_func(
ulint i;
const dict_index_t* clust_index;
- ut_ad(index);
- ut_ad(update);
ut_ad(thr);
ut_ad(thr->graph);
ut_ad(thr->graph->trx);
diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc
index 3865332efbe..5389b7e9bc4 100644
--- a/storage/xtradb/srv/srv0srv.cc
+++ b/storage/xtradb/srv/srv0srv.cc
@@ -79,12 +79,6 @@ Created 10/8/1995 Heikki Tuuri
#include <my_rdtsc.h>
#include "btr0scrub.h"
-/* prototypes of new functions added to ha_innodb.cc for kill_idle_transaction */
-ibool innobase_thd_is_idle(const void* thd);
-ib_int64_t innobase_thd_get_start_time(const void* thd);
-void innobase_thd_kill(ulong thd_id);
-ulong innobase_thd_get_thread_id(const void* thd);
-
/* prototypes for new functions added to ha_innodb.cc */
ibool innobase_get_slow_log();
@@ -506,6 +500,7 @@ this many index pages, there are 2 ways to calculate statistics:
table/index are not found in the innodb database */
UNIV_INTERN unsigned long long srv_stats_transient_sample_pages = 8;
UNIV_INTERN my_bool srv_stats_persistent = TRUE;
+UNIV_INTERN my_bool srv_stats_include_delete_marked = FALSE;
UNIV_INTERN unsigned long long srv_stats_persistent_sample_pages = 20;
UNIV_INTERN my_bool srv_stats_auto_recalc = TRUE;
@@ -1480,22 +1475,26 @@ srv_printf_innodb_monitor(
low level 135. Therefore we can reserve the latter mutex here without
a danger of a deadlock of threads. */
- mutex_enter(&dict_foreign_err_mutex);
+ if (!recv_recovery_on) {
- if (!srv_read_only_mode && ftell(dict_foreign_err_file) != 0L) {
- fputs("------------------------\n"
- "LATEST FOREIGN KEY ERROR\n"
- "------------------------\n", file);
- ut_copy_file(file, dict_foreign_err_file);
- }
+ mutex_enter(&dict_foreign_err_mutex);
+
+ if (!srv_read_only_mode
+ && ftell(dict_foreign_err_file) != 0L) {
+ fputs("------------------------\n"
+ "LATEST FOREIGN KEY ERROR\n"
+ "------------------------\n", file);
+ ut_copy_file(file, dict_foreign_err_file);
+ }
- mutex_exit(&dict_foreign_err_mutex);
+ mutex_exit(&dict_foreign_err_mutex);
+ }
/* Only if lock_print_info_summary proceeds correctly,
before we call the lock_print_info_all_transactions
to print all the lock information. IMPORTANT NOTE: This
function acquires the lock mutex on success. */
- ret = lock_print_info_summary(file, nowait);
+ ret = recv_recovery_on ? FALSE : lock_print_info_summary(file, nowait);
if (ret) {
if (trx_start_pos) {
@@ -1528,10 +1527,13 @@ srv_printf_innodb_monitor(
"--------\n", file);
os_aio_print(file);
- fputs("-------------------------------------\n"
- "INSERT BUFFER AND ADAPTIVE HASH INDEX\n"
- "-------------------------------------\n", file);
- ibuf_print(file);
+ if (!recv_recovery_on) {
+
+ fputs("-------------------------------------\n"
+ "INSERT BUFFER AND ADAPTIVE HASH INDEX\n"
+ "-------------------------------------\n", file);
+ ibuf_print(file);
+ }
fprintf(file,
@@ -1543,10 +1545,13 @@ srv_printf_innodb_monitor(
btr_cur_n_sea_old = btr_cur_n_sea;
btr_cur_n_non_sea_old = btr_cur_n_non_sea;
- fputs("---\n"
- "LOG\n"
- "---\n", file);
- log_print(file);
+ if (!recv_recovery_on) {
+
+ fputs("---\n"
+ "LOG\n"
+ "---\n", file);
+ log_print(file);
+ }
fputs("----------------------\n"
"BUFFER POOL AND MEMORY\n"
@@ -1641,8 +1646,9 @@ srv_printf_innodb_monitor(
? (recv_sys->addr_hash->n_cells * sizeof(hash_cell_t)) : 0),
recv_sys_subtotal);
+
fprintf(file, "Dictionary memory allocated " ULINTPF "\n",
- dict_sys->size);
+ dict_sys ? dict_sys->size : 0);
buf_print_io(file);
@@ -1750,6 +1756,10 @@ srv_printf_innodb_monitor(
mutex_exit(&srv_innodb_monitor_mutex);
fflush(file);
+#ifndef DBUG_OFF
+ srv_debug_monitor_printed = true;
+#endif
+
return(ret);
}
@@ -2118,6 +2128,12 @@ srv_export_innodb_status(void)
mutex_exit(&srv_innodb_monitor_mutex);
}
+#ifndef DBUG_OFF
+/** false before InnoDB monitor has been printed at least once, true
+afterwards */
+bool srv_debug_monitor_printed = false;
+#endif
+
/*********************************************************************//**
A thread which prints the info output by various InnoDB monitors.
@return a dummy parameter */
@@ -2391,36 +2407,6 @@ loop:
old_sema = sema;
}
- if (srv_kill_idle_transaction && trx_sys) {
- trx_t* trx;
- time_t now;
-rescan_idle:
- now = time(NULL);
- mutex_enter(&trx_sys->mutex);
- trx = UT_LIST_GET_FIRST(trx_sys->mysql_trx_list);
- while (trx) {
- if (trx->state == TRX_STATE_ACTIVE
- && trx->mysql_thd
- && innobase_thd_is_idle(trx->mysql_thd)) {
- ib_int64_t start_time = innobase_thd_get_start_time(trx->mysql_thd);
- ulong thd_id = innobase_thd_get_thread_id(trx->mysql_thd);
-
- if (trx->last_stmt_start != start_time) {
- trx->idle_start = now;
- trx->last_stmt_start = start_time;
- } else if (difftime(now, trx->idle_start)
- > srv_kill_idle_transaction) {
- /* kill the session */
- mutex_exit(&trx_sys->mutex);
- innobase_thd_kill(thd_id);
- goto rescan_idle;
- }
- }
- trx = UT_LIST_GET_NEXT(mysql_trx_list, trx);
- }
- mutex_exit(&trx_sys->mutex);
- }
-
/* Flush stderr so that a database user gets the output
to possible MySQL error file */
@@ -2542,10 +2528,8 @@ DECLARE_THREAD(srv_redo_log_follow_thread)(
} while (srv_shutdown_state < SRV_SHUTDOWN_LAST_PHASE);
- srv_track_changed_pages = FALSE;
log_online_read_shutdown();
os_event_set(srv_redo_log_tracked_event);
- srv_redo_log_thread_started = false; /* Defensive, not required */
my_thread_end();
os_thread_exit(NULL);
diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc
index 679913959c9..5b6ca38951e 100644
--- a/storage/xtradb/srv/srv0start.cc
+++ b/storage/xtradb/srv/srv0start.cc
@@ -2120,6 +2120,7 @@ innobase_start_or_create_for_mysql(void)
fsp_init();
log_init();
+ log_online_init();
lock_sys_create(srv_lock_table_size);
@@ -2513,6 +2514,23 @@ files_checked:
and there must be no page in the buf_flush list. */
buf_pool_invalidate();
+ /* Start monitor thread early enough so that e.g. crash
+ recovery failing to find free pages in the buffer pool is
+ diagnosed. */
+ if (!srv_read_only_mode)
+ {
+ /* Create the thread which prints InnoDB monitor
+ info */
+ srv_monitor_active = true;
+ thread_handles[4 + SRV_MAX_N_IO_THREADS] =
+ os_thread_create(
+ srv_monitor_thread,
+ NULL,
+ thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+
+ thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
+ }
+
/* We always try to do a recovery, even if the database had
been shut down normally: this is the normal startup path */
@@ -2533,7 +2551,7 @@ files_checked:
return(err);
}
- /* This must precede recv_apply_hashed_log_recs(TRUE). */
+ /* This must precede recv_apply_hashed_log_recs(true). */
ib_bh = trx_sys_init_at_db_start();
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
@@ -2541,12 +2559,8 @@ files_checked:
respective file pages, for the last batch of
recv_group_scan_log_recs(). */
- err = recv_apply_hashed_log_recs(TRUE);
+ recv_apply_hashed_log_recs(true);
DBUG_PRINT("ib_log", ("apply completed"));
-
- if (err != DB_SUCCESS) {
- return(err);
- }
}
if (!srv_read_only_mode) {
@@ -2860,11 +2874,14 @@ files_checked:
thread_started[3 + SRV_MAX_N_IO_THREADS] = true;
/* Create the thread which prints InnoDB monitor info */
- srv_monitor_active = true;
- thread_handles[4 + SRV_MAX_N_IO_THREADS] = os_thread_create(
- srv_monitor_thread,
- NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
- thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
+ if (!thread_started[4 + SRV_MAX_N_IO_THREADS]) {
+ /* srv_monitor_thread not yet started */
+ srv_monitor_active = true;
+ thread_handles[4 + SRV_MAX_N_IO_THREADS] = os_thread_create(
+ srv_monitor_thread,
+ NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+ thread_started[4 + SRV_MAX_N_IO_THREADS] = true;
+ }
}
/* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */
@@ -3235,6 +3252,7 @@ innobase_shutdown_for_mysql(void)
btr_search_disable();
ibuf_close();
+ log_online_shutdown();
log_shutdown();
trx_sys_file_format_close();
trx_sys_close();
diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc
index f3516958d89..6692eef9fb0 100644
--- a/storage/xtradb/sync/sync0sync.cc
+++ b/storage/xtradb/sync/sync0sync.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc
index 45315d35c42..1b4d4f00074 100644
--- a/storage/xtradb/trx/trx0sys.cc
+++ b/storage/xtradb/trx/trx0sys.cc
@@ -1383,6 +1383,33 @@ trx_sys_close(void)
trx_sys = NULL;
}
+/** @brief Convert an undo log to TRX_UNDO_PREPARED state on shutdown.
+
+If any prepared ACTIVE transactions exist, and their rollback was
+prevented by innodb_force_recovery, we convert these transactions to
+XA PREPARE state in the main-memory data structures, so that shutdown
+will proceed normally. These transactions will again recover as ACTIVE
+on the next restart, and they will be rolled back unless
+innodb_force_recovery prevents it again.
+
+@param[in] trx transaction
+@param[in,out] undo undo log to convert to TRX_UNDO_PREPARED */
+static
+void
+trx_undo_fake_prepared(
+ const trx_t* trx,
+ trx_undo_t* undo)
+{
+ ut_ad(srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
+ ut_ad(trx->is_recovered);
+
+ if (undo != NULL) {
+ ut_ad(undo->state == TRX_UNDO_ACTIVE);
+ undo->state = TRX_UNDO_PREPARED;
+ }
+}
+
/*********************************************************************
Check if there are any active (non-prepared) transactions.
@return total number of active transactions or 0 if none */
@@ -1391,15 +1418,42 @@ ulint
trx_sys_any_active_transactions(void)
/*=================================*/
{
- ulint total_trx = 0;
-
mutex_enter(&trx_sys->mutex);
- total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list)
- + UT_LIST_GET_LEN(trx_sys->mysql_trx_list);
+ ulint total_trx = UT_LIST_GET_LEN(trx_sys->mysql_trx_list);
+
+ if (total_trx == 0) {
+ total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list);
+ ut_a(total_trx >= trx_sys->n_prepared_trx);
+
+ if (total_trx > trx_sys->n_prepared_trx
+ && srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO) {
+ for (trx_t* trx = UT_LIST_GET_FIRST(
+ trx_sys->rw_trx_list);
+ trx != NULL;
+ trx = UT_LIST_GET_NEXT(trx_list, trx)) {
+ if (!trx_state_eq(trx, TRX_STATE_ACTIVE)
+ || !trx->is_recovered) {
+ continue;
+ }
+ /* This was a recovered transaction
+ whose rollback was disabled by
+ the innodb_force_recovery setting.
+ Pretend that it is in XA PREPARE
+ state so that shutdown will work. */
+ trx_undo_fake_prepared(
+ trx, trx->insert_undo);
+ trx_undo_fake_prepared(
+ trx, trx->update_undo);
+ trx->state = TRX_STATE_PREPARED;
+ trx_sys->n_prepared_trx++;
+ trx_sys->n_prepared_recovered_trx++;
+ }
+ }
- ut_a(total_trx >= trx_sys->n_prepared_trx);
- total_trx -= trx_sys->n_prepared_trx;
+ ut_a(total_trx >= trx_sys->n_prepared_trx);
+ total_trx -= trx_sys->n_prepared_trx;
+ }
mutex_exit(&trx_sys->mutex);