summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bzrignore1
-rw-r--r--client/mysqldump.c24
-rw-r--r--include/my_sys.h2
-rw-r--r--mysql-test/r/bdb.result34
-rw-r--r--mysql-test/r/create.result2
-rw-r--r--mysql-test/r/func_time.result2
-rw-r--r--mysql-test/r/func_timestamp.result2
-rw-r--r--mysql-test/r/lock_multi.result15
-rw-r--r--mysql-test/r/mysqldump.result28
-rw-r--r--mysql-test/r/ps.result105
-rw-r--r--mysql-test/r/sp.result46
-rw-r--r--mysql-test/r/type_timestamp.result2
-rw-r--r--mysql-test/r/udf.result18
-rw-r--r--mysql-test/t/bdb.test35
-rw-r--r--mysql-test/t/create.test2
-rw-r--r--mysql-test/t/func_time.test7
-rw-r--r--mysql-test/t/func_timestamp.test6
-rw-r--r--mysql-test/t/lock_multi.test50
-rw-r--r--mysql-test/t/mysqldump.test33
-rw-r--r--mysql-test/t/ps.test118
-rw-r--r--mysql-test/t/sp.test46
-rw-r--r--mysql-test/t/type_timestamp.test6
-rw-r--r--mysql-test/t/udf.test12
-rw-r--r--mysys/mf_dirname.c4
-rw-r--r--mysys/my_lib.c2
-rw-r--r--mysys/my_malloc.c2
-rw-r--r--mysys/safemalloc.c2
-rw-r--r--mysys/thr_lock.c2
-rw-r--r--ndb/include/kernel/GlobalSignalNumbers.h6
-rw-r--r--ndb/include/kernel/signaldata/BackupContinueB.hpp3
-rw-r--r--ndb/include/kernel/signaldata/BackupImpl.hpp22
-rw-r--r--ndb/include/kernel/signaldata/BackupSignalData.hpp8
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp11
-rw-r--r--ndb/include/kernel/signaldata/LqhFrag.hpp25
-rw-r--r--ndb/include/kernel/signaldata/TupFrag.hpp15
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp14
-rw-r--r--ndb/src/common/debugger/signaldata/BackupImpl.cpp6
-rw-r--r--ndb/src/common/debugger/signaldata/BackupSignalData.cpp6
-rw-r--r--ndb/src/common/debugger/signaldata/DictTabInfo.cpp8
-rw-r--r--ndb/src/common/debugger/signaldata/LqhFrag.cpp6
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp163
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.hpp15
-rw-r--r--ndb/src/kernel/blocks/backup/BackupFormat.hpp17
-rw-r--r--ndb/src/kernel/blocks/backup/BackupInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp32
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp4
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp10
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp94
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp5
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp79
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp6
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp10
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp4
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp24
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp20
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp3
-rw-r--r--ndb/tools/restore/Restore.cpp58
-rw-r--r--ndb/tools/restore/Restore.hpp15
-rw-r--r--ndb/tools/restore/consumer_restore.cpp10
-rw-r--r--scripts/Makefile.am6
-rw-r--r--scripts/make_binary_distribution.sh20
-rw-r--r--scripts/mysql_upgrade_shell.sh (renamed from scripts/mysql_upgrade.sh)0
-rw-r--r--server-tools/instance-manager/instance_options.cc9
-rw-r--r--sql/ha_federated.cc3
-rw-r--r--sql/ha_ndbcluster.cc9
-rw-r--r--sql/item_strfunc.cc4
-rw-r--r--sql/lock.cc44
-rw-r--r--sql/log.cc22
-rw-r--r--sql/log_event.cc51
-rw-r--r--sql/log_event.h21
-rw-r--r--sql/mysql_priv.h1
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/set_var.h2
-rw-r--r--sql/slave.cc18
-rw-r--r--sql/slave.h6
-rw-r--r--sql/sp.cc107
-rw-r--r--sql/sp.h14
-rw-r--r--sql/sp_head.cc70
-rw-r--r--sql/sp_head.h10
-rw-r--r--sql/sql_base.cc14
-rw-r--r--sql/sql_class.h45
-rw-r--r--sql/sql_db.cc11
-rw-r--r--sql/sql_handler.cc11
-rw-r--r--sql/sql_insert.cc41
-rw-r--r--sql/sql_lex.h5
-rw-r--r--sql/sql_parse.cc312
-rw-r--r--sql/sql_table.cc29
-rw-r--r--sql/sql_trigger.cc9
-rw-r--r--sql/sql_udf.cc6
-rw-r--r--sql/sql_udf.h2
-rw-r--r--sql/sql_view.cc9
-rw-r--r--sql/sql_yacc.yy47
-rw-r--r--sql/table.cc8
-rw-r--r--sql/table.h3
-rw-r--r--sql/tztime.cc6
-rw-r--r--support-files/mysql.spec.sh28
96 files changed, 1764 insertions, 551 deletions
diff --git a/.bzrignore b/.bzrignore
index eafc384fda5..ef02a085144 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1285,3 +1285,4 @@ vio/viotest-sslconnect.cpp
vio/viotest.cpp
zlib/*.ds?
zlib/*.vcproj
+BitKeeper/etc/RESYNC_TREE
diff --git a/client/mysqldump.c b/client/mysqldump.c
index e89e8064c9a..53cb06be6f3 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -1503,9 +1503,15 @@ static uint get_table_structure(char *table, char *db, char *table_type,
field= mysql_fetch_field_direct(result, 0);
if (strcmp(field->name, "View") == 0)
{
+ char *scv_buff = NULL;
+
if (verbose)
fprintf(stderr, "-- It's a view, create dummy table for view\n");
+ /* save "show create" statement for later */
+ if ((row= mysql_fetch_row(result)) && (scv_buff=row[1]))
+ scv_buff= my_strdup(scv_buff, MYF(0));
+
mysql_free_result(result);
/*
@@ -1523,9 +1529,22 @@ static uint get_table_structure(char *table, char *db, char *table_type,
"SHOW FIELDS FROM %s", result_table);
if (mysql_query_with_error_report(sock, 0, query_buff))
{
+ /*
+ View references invalid or privileged table/col/fun (err 1356),
+ so we cannot create a stand-in table. Be defensive and dump
+ a comment with the view's 'show create' statement. (Bug #17371)
+ */
+
+ if (mysql_errno(sock) == ER_VIEW_INVALID)
+ fprintf(sql_file, "\n-- failed on view %s: %s\n\n", result_table, scv_buff ? scv_buff : "");
+
+ my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR));
+
safe_exit(EX_MYSQLERR);
- DBUG_RETURN(0);
+ DBUG_RETURN(0);
}
+ else
+ my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR));
if ((result= mysql_store_result(sock)))
{
@@ -1566,6 +1585,9 @@ static uint get_table_structure(char *table, char *db, char *table_type,
}
mysql_free_result(result);
+ if (path)
+ my_fclose(sql_file, MYF(MY_WME));
+
seen_views= 1;
DBUG_RETURN(0);
}
diff --git a/include/my_sys.h b/include/my_sys.h
index 229389f1ac5..cbd7c79fa11 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -157,7 +157,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags);
extern void my_no_flags_free(gptr ptr);
extern gptr my_memdup(const byte *from,uint length,myf MyFlags);
extern char *my_strdup(const char *from,myf MyFlags);
-extern char *my_strdup_with_length(const byte *from, uint length,
+extern char *my_strdup_with_length(const char *from, uint length,
myf MyFlags);
/* we do use FG (as a no-op) in below so that a typo on FG is caught */
#define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR))
diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result
index af6319afe99..ee7cdceefda 100644
--- a/mysql-test/r/bdb.result
+++ b/mysql-test/r/bdb.result
@@ -1928,4 +1928,38 @@ create table t1 (a int) engine=bdb;
commit;
alter table t1 add primary key(a);
drop table t1;
+set autocommit=1;
+reset master;
+create table bug16206 (a int);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+show binlog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
+f n Query 1 n use `test`; create table bug16206 (a int)
+f n Query 1 n use `test`; insert into bug16206 values(1)
+f n Query 1 n use `test`; insert into bug16206 values(2)
+drop table bug16206;
+reset master;
+create table bug16206 (a int) engine= bdb;
+insert into bug16206 values(0);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+insert into bug16206 values(3);
+show binlog events;
+Log_name Pos Event_type Server_id End_log_pos Info
+f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
+f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb
+f n Query 1 n use `test`; insert into bug16206 values(0)
+f n Query 1 n use `test`; insert into bug16206 values(1)
+f n Query 1 n use `test`; BEGIN
+f n Query 1 n use `test`; insert into bug16206 values(2)
+f n Query 1 n use `test`; COMMIT
+f n Query 1 n use `test`; insert into bug16206 values(3)
+drop table bug16206;
+set autocommit=0;
End of 5.0 tests
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index 27a6c8a9d03..c5b77ea4925 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -607,7 +607,7 @@ create database mysqltest;
use mysqltest;
drop database mysqltest;
create table test.t1 like x;
-ERROR 42000: Incorrect database name 'NULL'
+ERROR 3D000: No database selected
drop table if exists test.t1;
create database mysqltest;
use mysqltest;
diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result
index 593ce7b26c8..aaa86378626 100644
--- a/mysql-test/r/func_time.result
+++ b/mysql-test/r/func_time.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2,t3;
+set time_zone="+03:00";
select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29");
from_days(to_days("960101")) to_days(960201)-to_days("19960101") to_days(date_add(curdate(), interval 1 day))-to_days(curdate()) weekday("1997-11-29")
1996-01-01 31 1 5
@@ -945,3 +946,4 @@ id day id day
1 2005-06-01 3 2005-07-15
3 2005-07-01 3 2005-07-15
DROP TABLE t1,t2;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/func_timestamp.result b/mysql-test/r/func_timestamp.result
index d9912f08b72..495fedea9e6 100644
--- a/mysql-test/r/func_timestamp.result
+++ b/mysql-test/r/func_timestamp.result
@@ -1,4 +1,5 @@
drop table if exists t1;
+set time_zone="+03:00";
create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null,
Jahr smallint not null, index(Tag), index(Monat), index(Jahr) );
insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998);
@@ -9,3 +10,4 @@ Date Unix
1998-9-16 09:26:00 905927160
1998-9-16 09:26:00 905927160
drop table t1;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result
index 2188d58e526..c80108f723a 100644
--- a/mysql-test/r/lock_multi.result
+++ b/mysql-test/r/lock_multi.result
@@ -67,6 +67,21 @@ Select_priv
N
use test;
use test;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+ FLUSH TABLES WITH READ LOCK;
+CREATE TABLE t2 (c1 int);
+UNLOCK TABLES;
+UNLOCK TABLES;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+ FLUSH TABLES WITH READ LOCK;
+CREATE TABLE t2 AS SELECT * FROM t1;
+ERROR HY000: Table 't2' was not locked with LOCK TABLES
+UNLOCK TABLES;
+UNLOCK TABLES;
+DROP TABLE t1;
create table t1 (f1 int(12) unsigned not null auto_increment, primary key(f1)) engine=innodb;
lock tables t1 write;
alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; //
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index e496e1ef35d..f9714e067e6 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -2743,6 +2743,25 @@ end AFTER # root@localhost
drop trigger tr1;
drop trigger tr2;
drop table t1, t2;
+create table t (qty int, price int);
+insert into t values(3, 50);
+insert into t values(5, 51);
+create view v1 as select qty, price, qty*price as value from t;
+create view v2 as select qty from v1;
+mysqldump {
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v1` AS select `t`.`qty` AS `qty`,`t`.`price` AS `price`,(`t`.`qty` * `t`.`price`) AS `value` from `t` */;
+
+} mysqldump {
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */
+/*!50001 VIEW `v2` AS select `v1`.`qty` AS `qty` from `v1` */;
+
+} mysqldump
+drop view v1;
+drop view v2;
+drop table t;
/*!50003 CREATE FUNCTION `f`() RETURNS bigint(20)
return 42 */|
/*!50003 CREATE PROCEDURE `p`()
@@ -2757,6 +2776,15 @@ p CREATE DEFINER=`root`@`localhost` PROCEDURE `p`()
select 42
drop function f;
drop procedure p;
+create table t1 ( id serial );
+create view v1 as select * from t1;
+drop table t1;
+mysqldump {
+
+-- failed on view `v1`: CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1`
+
+} mysqldump
+drop view v1;
create database mysqldump_test_db;
use mysqldump_test_db;
create table t1 (id int);
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index abebfc8cd93..3ce2f5169e2 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -1158,3 +1158,108 @@ Warnings:
Error 1146 Table 'test.t4' doesn't exist
deallocate prepare stmt;
drop table t1, t2, t3;
+create database mysqltest_long_database_name_to_thrash_heap;
+use test;
+create table t1 (i int);
+prepare stmt from "alter table test.t1 rename t1";
+use mysqltest_long_database_name_to_thrash_heap;
+execute stmt;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+prepare stmt from "alter table test.t1 rename t1";
+use test;
+execute stmt;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+deallocate prepare stmt;
+use mysqltest_long_database_name_to_thrash_heap;
+prepare stmt_create from "create table t1 (i int)";
+prepare stmt_insert from "insert into t1 (i) values (1)";
+prepare stmt_update from "update t1 set i=2";
+prepare stmt_delete from "delete from t1 where i=2";
+prepare stmt_select from "select * from t1";
+prepare stmt_alter from "alter table t1 add column (b int)";
+prepare stmt_alter1 from "alter table t1 drop column b";
+prepare stmt_analyze from "analyze table t1";
+prepare stmt_optimize from "optimize table t1";
+prepare stmt_show from "show tables like 't1'";
+prepare stmt_truncate from "truncate table t1";
+prepare stmt_drop from "drop table t1";
+drop table t1;
+use test;
+execute stmt_create;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+use test;
+execute stmt_insert;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+i
+1
+execute stmt_update;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+i
+2
+execute stmt_delete;
+execute stmt_select;
+i
+execute stmt_alter;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+b int(11) YES NULL
+execute stmt_alter1;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+Field Type Null Key Default Extra
+i int(11) YES NULL
+execute stmt_analyze;
+Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date
+execute stmt_optimize;
+Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 optimize status Table is already up to date
+execute stmt_show;
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+t1
+execute stmt_truncate;
+execute stmt_drop;
+show tables like 't1';
+Tables_in_test (t1)
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+Tables_in_mysqltest_long_database_name_to_thrash_heap (t1)
+drop database mysqltest_long_database_name_to_thrash_heap;
+prepare stmt_create from "create table t1 (i int)";
+ERROR 3D000: No database selected
+prepare stmt_insert from "insert into t1 (i) values (1)";
+ERROR 3D000: No database selected
+prepare stmt_update from "update t1 set i=2";
+ERROR 3D000: No database selected
+prepare stmt_delete from "delete from t1 where i=2";
+ERROR 3D000: No database selected
+prepare stmt_select from "select * from t1";
+ERROR 3D000: No database selected
+prepare stmt_alter from "alter table t1 add column (b int)";
+ERROR 3D000: No database selected
+prepare stmt_alter1 from "alter table t1 drop column b";
+ERROR 3D000: No database selected
+prepare stmt_analyze from "analyze table t1";
+ERROR 3D000: No database selected
+prepare stmt_optimize from "optimize table t1";
+ERROR 3D000: No database selected
+prepare stmt_show from "show tables like 't1'";
+ERROR 3D000: No database selected
+prepare stmt_truncate from "truncate table t1";
+ERROR 3D000: No database selected
+prepare stmt_drop from "drop table t1";
+ERROR 3D000: No database selected
+create temporary table t1 (i int);
+ERROR 3D000: No database selected
+use test;
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index d3874c769fa..96bf2f01f86 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -4990,6 +4990,52 @@ CALL bug18037_p2()|
DROP FUNCTION bug18037_f1|
DROP PROCEDURE bug18037_p1|
DROP PROCEDURE bug18037_p2|
+use test|
+create table t3 (i int)|
+insert into t3 values (1), (2)|
+create database mysqltest1|
+use mysqltest1|
+create function bug17199() returns varchar(2) deterministic return 'ok'|
+use test|
+select *, mysqltest1.bug17199() from t3|
+i mysqltest1.bug17199()
+1 ok
+2 ok
+use mysqltest1|
+create function bug18444(i int) returns int no sql deterministic return i + 1|
+use test|
+select mysqltest1.bug18444(i) from t3|
+mysqltest1.bug18444(i)
+2
+3
+drop database mysqltest1|
+create database mysqltest1 charset=utf8|
+create database mysqltest2 charset=utf8|
+create procedure mysqltest1.p1()
+begin
+-- alters the default collation of database test
+alter database character set koi8r;
+end|
+use mysqltest1|
+call p1()|
+show create database mysqltest1|
+Database Create Database
+mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */
+show create database mysqltest2|
+Database Create Database
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */
+alter database mysqltest1 character set utf8|
+use mysqltest2|
+call mysqltest1.p1()|
+show create database mysqltest1|
+Database Create Database
+mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */
+show create database mysqltest2|
+Database Create Database
+mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */
+drop database mysqltest1|
+drop database mysqltest2|
+use test|
drop table if exists t3|
drop procedure if exists bug15217|
create table t3 as select 1|
diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result
index 61ed6bbabf3..0817cc3b6c7 100644
--- a/mysql-test/r/type_timestamp.result
+++ b/mysql-test/r/type_timestamp.result
@@ -1,4 +1,5 @@
drop table if exists t1,t2;
+set time_zone="+03:00";
CREATE TABLE t1 (a int, t timestamp);
CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
@@ -491,3 +492,4 @@ a b c
5 NULL 2001-09-09 04:46:59
6 NULL 2006-06-06 06:06:06
drop table t1;
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result
index be52fd7f87c..484c42c41bf 100644
--- a/mysql-test/r/udf.result
+++ b/mysql-test/r/udf.result
@@ -76,6 +76,24 @@ call XXX2();
metaphon(testval)
HL
drop procedure xxx2;
+CREATE TABLE bug19904(n INT, v varchar(10));
+INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four');
+SELECT myfunc_double(n) AS f FROM bug19904;
+f
+49.00
+50.00
+NULL
+51.00
+52.00
+SELECT metaphon(v) AS f FROM bug19904;
+f
+ON
+TW
+NULL
+0R
+FR
+DROP TABLE bug19904;
+End of 5.0 tests.
DROP FUNCTION metaphon;
DROP FUNCTION myfunc_double;
DROP FUNCTION myfunc_nonexist;
diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test
index d017d91bfb1..ec05eeb3c34 100644
--- a/mysql-test/t/bdb.test
+++ b/mysql-test/t/bdb.test
@@ -1019,4 +1019,39 @@ commit;
alter table t1 add primary key(a);
drop table t1;
+
+#
+# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode
+#
+set autocommit=1;
+
+let $VERSION=`select version()`;
+
+reset master;
+create table bug16206 (a int);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+--replace_result $VERSION VERSION
+--replace_column 1 f 2 n 5 n
+show binlog events;
+drop table bug16206;
+
+reset master;
+create table bug16206 (a int) engine= bdb;
+insert into bug16206 values(0);
+insert into bug16206 values(1);
+start transaction;
+insert into bug16206 values(2);
+commit;
+insert into bug16206 values(3);
+--replace_result $VERSION VERSION
+--replace_column 1 f 2 n 5 n
+show binlog events;
+drop table bug16206;
+
+set autocommit=0;
+
+
--echo End of 5.0 tests
diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test
index e22c2b5c426..07edbf206fe 100644
--- a/mysql-test/t/create.test
+++ b/mysql-test/t/create.test
@@ -517,7 +517,7 @@ DROP TABLE t12913;
create database mysqltest;
use mysqltest;
drop database mysqltest;
---error 1102
+--error ER_NO_DB_ERROR
create table test.t1 like x;
--disable_warnings
drop table if exists test.t1;
diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test
index d817d016881..05c033f2b22 100644
--- a/mysql-test/t/func_time.test
+++ b/mysql-test/t/func_time.test
@@ -5,6 +5,9 @@
drop table if exists t1,t2,t3;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29");
select period_add("9602",-12),period_diff(199505,"9404") ;
@@ -335,6 +338,7 @@ select last_day("1997-12-1")+0.0;
# Test SAPDB UTC_% functions. This part is TZ dependant (It is supposed that
# TZ variable set to GMT-3
+
select strcmp(date_sub(localtimestamp(), interval 3 hour), utc_timestamp())=0;
select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%T"), utc_time())=0;
select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%Y-%m-%d"), utc_date())=0;
@@ -513,3 +517,6 @@ SELECT * FROM t1, t2
DROP TABLE t1,t2;
# End of 5.0 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/func_timestamp.test b/mysql-test/t/func_timestamp.test
index e1bb7e878ee..05a91b06d28 100644
--- a/mysql-test/t/func_timestamp.test
+++ b/mysql-test/t/func_timestamp.test
@@ -6,6 +6,9 @@
drop table if exists t1;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null,
Jahr smallint not null, index(Tag), index(Monat), index(Jahr) );
insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998);
@@ -15,3 +18,6 @@ FROM t1;
drop table t1;
# End of 4.1 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test
index 905d0699e6a..627c33b3d82 100644
--- a/mysql-test/t/lock_multi.test
+++ b/mysql-test/t/lock_multi.test
@@ -142,6 +142,7 @@ disconnect con2;
--error ER_DB_DROP_EXISTS
DROP DATABASE mysqltest_1;
+#
# Bug#16986 - Deadlock condition with MyISAM tables
#
connection locker;
@@ -170,6 +171,55 @@ connection locker;
use test;
#
connection default;
+#
+# Test if CREATE TABLE with LOCK TABLE deadlocks.
+#
+connection writer;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+#
+# This waits until t1 is unlocked.
+connection locker;
+send FLUSH TABLES WITH READ LOCK;
+--sleep 1
+#
+# This must not block.
+connection writer;
+CREATE TABLE t2 (c1 int);
+UNLOCK TABLES;
+#
+# This awakes now.
+connection locker;
+reap;
+UNLOCK TABLES;
+#
+connection default;
+DROP TABLE t1, t2;
+#
+# Test if CREATE TABLE SELECT with LOCK TABLE deadlocks.
+#
+connection writer;
+CREATE TABLE t1 (c1 int);
+LOCK TABLE t1 WRITE;
+#
+# This waits until t1 is unlocked.
+connection locker;
+send FLUSH TABLES WITH READ LOCK;
+--sleep 1
+#
+# This must not block.
+connection writer;
+--error 1100
+CREATE TABLE t2 AS SELECT * FROM t1;
+UNLOCK TABLES;
+#
+# This awakes now.
+connection locker;
+reap;
+UNLOCK TABLES;
+#
+connection default;
+DROP TABLE t1;
#
# Bug #17264: MySQL Server freeze
diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test
index ab865594d42..b6a552adbde 100644
--- a/mysql-test/t/mysqldump.test
+++ b/mysql-test/t/mysqldump.test
@@ -1146,6 +1146,27 @@ drop table t1, t2;
#
+# Bug#18462 mysqldump does not dump view structures correctly
+#
+#
+create table t (qty int, price int);
+insert into t values(3, 50);
+insert into t values(5, 51);
+create view v1 as select qty, price, qty*price as value from t;
+create view v2 as select qty from v1;
+--echo mysqldump {
+--exec $MYSQL_DUMP --compact -F --tab . test
+--exec cat v1.sql
+--echo } mysqldump {
+--exec cat v2.sql
+--echo } mysqldump
+--rm v.sql t.sql t.txt
+drop view v1;
+drop view v2;
+drop table t;
+
+
+#
# Bug#14857 Reading dump files with single statement stored routines fails.
# fixed by patch for bug#16878
#
@@ -1162,6 +1183,18 @@ drop function f;
drop procedure p;
#
+# Bug #17371 Unable to dump a schema with invalid views
+#
+#
+create table t1 ( id serial );
+create view v1 as select * from t1;
+drop table t1;
+# mysqldump gets 1356 from server, but gives us 2
+--echo mysqldump {
+--error 2
+--exec $MYSQL_DUMP --force -N --compact --skip-comments test
+--echo } mysqldump
+drop view v1;
# BUG#17201 Spurious 'DROP DATABASE' in output,
# also confusion between tables and views.
# Example code from Markus Popp
diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test
index e3f3e37cd4c..ff66b265fae 100644
--- a/mysql-test/t/ps.test
+++ b/mysql-test/t/ps.test
@@ -1146,4 +1146,122 @@ execute stmt;
execute stmt;
deallocate prepare stmt;
drop table t1, t2, t3;
+
+#
+# Bug#17199 "Table not found" error occurs if the query contains a call
+# to a function from another database.
+# Test prepared statements- related behaviour.
+#
+#
+# ALTER TABLE RENAME and Prepared Statements: wrong DB name buffer was used
+# in ALTER ... RENAME which caused memory corruption in prepared statements.
+# No need to fix this problem in 4.1 as ALTER TABLE is not allowed in
+# Prepared Statements in 4.1.
+#
+create database mysqltest_long_database_name_to_thrash_heap;
+use test;
+create table t1 (i int);
+prepare stmt from "alter table test.t1 rename t1";
+use mysqltest_long_database_name_to_thrash_heap;
+execute stmt;
+show tables like 't1';
+prepare stmt from "alter table test.t1 rename t1";
+use test;
+execute stmt;
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+deallocate prepare stmt;
+#
+# Check that a prepared statement initializes its current database at
+# PREPARE, and then works correctly even if the current database has been
+# changed.
+#
+use mysqltest_long_database_name_to_thrash_heap;
+# Necessary for preparation of INSERT/UPDATE/DELETE to succeed
+prepare stmt_create from "create table t1 (i int)";
+prepare stmt_insert from "insert into t1 (i) values (1)";
+prepare stmt_update from "update t1 set i=2";
+prepare stmt_delete from "delete from t1 where i=2";
+prepare stmt_select from "select * from t1";
+prepare stmt_alter from "alter table t1 add column (b int)";
+prepare stmt_alter1 from "alter table t1 drop column b";
+prepare stmt_analyze from "analyze table t1";
+prepare stmt_optimize from "optimize table t1";
+prepare stmt_show from "show tables like 't1'";
+prepare stmt_truncate from "truncate table t1";
+prepare stmt_drop from "drop table t1";
+# Drop the table that was used to prepare INSERT/UPDATE/DELETE: we will
+# create a new one by executing stmt_create
+drop table t1;
+# Switch the current database
+use test;
+# Check that all prepared statements operate on the database that was
+# active at PREPARE
+execute stmt_create;
+# should return empty set
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+use test;
+execute stmt_insert;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_update;
+select * from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_delete;
+execute stmt_select;
+execute stmt_alter;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_alter1;
+show columns from mysqltest_long_database_name_to_thrash_heap.t1;
+execute stmt_analyze;
+execute stmt_optimize;
+execute stmt_show;
+execute stmt_truncate;
+execute stmt_drop;
+show tables like 't1';
+use mysqltest_long_database_name_to_thrash_heap;
+show tables like 't1';
+#
+# Attempt a statement PREPARE when there is no current database:
+# is expected to return an error.
+#
+drop database mysqltest_long_database_name_to_thrash_heap;
+--error ER_NO_DB_ERROR
+prepare stmt_create from "create table t1 (i int)";
+--error ER_NO_DB_ERROR
+prepare stmt_insert from "insert into t1 (i) values (1)";
+--error ER_NO_DB_ERROR
+prepare stmt_update from "update t1 set i=2";
+--error ER_NO_DB_ERROR
+prepare stmt_delete from "delete from t1 where i=2";
+--error ER_NO_DB_ERROR
+prepare stmt_select from "select * from t1";
+--error ER_NO_DB_ERROR
+prepare stmt_alter from "alter table t1 add column (b int)";
+--error ER_NO_DB_ERROR
+prepare stmt_alter1 from "alter table t1 drop column b";
+--error ER_NO_DB_ERROR
+prepare stmt_analyze from "analyze table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_optimize from "optimize table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_show from "show tables like 't1'";
+--error ER_NO_DB_ERROR
+prepare stmt_truncate from "truncate table t1";
+--error ER_NO_DB_ERROR
+prepare stmt_drop from "drop table t1";
+#
+# The above has automatically deallocated all our statements.
+#
+# Attempt to CREATE a temporary table when no DB used: it should fail
+# This proves that no table can be used without explicit specification of
+# its database if there is no current database.
+#
+--error ER_NO_DB_ERROR
+create temporary table t1 (i int);
+#
+# Restore the old environemnt
+#
+use test;
# End of 5.0 tests
diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test
index 66498198157..25c96042e6f 100644
--- a/mysql-test/t/sp.test
+++ b/mysql-test/t/sp.test
@@ -5889,6 +5889,52 @@ DROP PROCEDURE bug18037_p1|
DROP PROCEDURE bug18037_p2|
#
+# Bug#17199: "Table not found" error occurs if the query contains a call
+# to a function from another database.
+# See also ps.test for an additional test case for this bug.
+#
+use test|
+create table t3 (i int)|
+insert into t3 values (1), (2)|
+create database mysqltest1|
+use mysqltest1|
+create function bug17199() returns varchar(2) deterministic return 'ok'|
+use test|
+select *, mysqltest1.bug17199() from t3|
+#
+# Bug#18444: Fully qualified stored function names don't work correctly
+# in select statements
+#
+use mysqltest1|
+create function bug18444(i int) returns int no sql deterministic return i + 1|
+use test|
+select mysqltest1.bug18444(i) from t3|
+drop database mysqltest1|
+#
+# Check that current database has no influence to a stored procedure
+#
+create database mysqltest1 charset=utf8|
+create database mysqltest2 charset=utf8|
+create procedure mysqltest1.p1()
+begin
+-- alters the default collation of database test
+ alter database character set koi8r;
+end|
+use mysqltest1|
+call p1()|
+show create database mysqltest1|
+show create database mysqltest2|
+alter database mysqltest1 character set utf8|
+use mysqltest2|
+call mysqltest1.p1()|
+show create database mysqltest1|
+show create database mysqltest2|
+drop database mysqltest1|
+drop database mysqltest2|
+#
+# Restore the old environemnt
+use test|
+#
# Bug#15217 "Using a SP cursor on a table created with PREPARE fails with
# weird error". Check that the code that is supposed to work at
# the first execution of a stored procedure actually works for
diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test
index f96beedbebc..ddfc3f11665 100644
--- a/mysql-test/t/type_timestamp.test
+++ b/mysql-test/t/type_timestamp.test
@@ -6,6 +6,9 @@
drop table if exists t1,t2;
--enable_warnings
+# Set timezone to GMT-3, to make it possible to use "interval 3 hour"
+set time_zone="+03:00";
+
CREATE TABLE t1 (a int, t timestamp);
CREATE TABLE t2 (a int, t datetime);
SET TIMESTAMP=1234;
@@ -322,3 +325,6 @@ select * from t1;
drop table t1;
# End of 4.1 tests
+
+# Restore timezone to default
+set time_zone= @@global.time_zone;
diff --git a/mysql-test/t/udf.test b/mysql-test/t/udf.test
index e2556692612..f3be08c8537 100644
--- a/mysql-test/t/udf.test
+++ b/mysql-test/t/udf.test
@@ -99,6 +99,17 @@ delimiter ;//
call XXX2();
drop procedure xxx2;
+#
+# Bug#19904: UDF: not initialized *is_null per row
+#
+
+CREATE TABLE bug19904(n INT, v varchar(10));
+INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four');
+SELECT myfunc_double(n) AS f FROM bug19904;
+SELECT metaphon(v) AS f FROM bug19904;
+DROP TABLE bug19904;
+
+--echo End of 5.0 tests.
#
# Drop the example functions from udf_example
@@ -114,3 +125,4 @@ DROP FUNCTION lookup;
DROP FUNCTION reverse_lookup;
DROP FUNCTION avgcost;
+
diff --git a/mysys/mf_dirname.c b/mysys/mf_dirname.c
index 9206aa28078..4d78f039799 100644
--- a/mysys/mf_dirname.c
+++ b/mysys/mf_dirname.c
@@ -72,7 +72,9 @@ uint dirname_part(my_string to, const char *name)
SYNPOSIS
convert_dirname()
- to Store result here
+ to Store result here. Must be at least of size
+ min(FN_REFLEN, strlen(from) + 1) to make room
+ for adding FN_LIBCHAR at the end.
from Original filename
from_end Pointer at end of filename (normally end \0)
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index 03f2d91916d..1c5630ad14e 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -501,7 +501,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
if (!(MyFlags & MY_DONT_SORT))
qsort((void *) result->dir_entry, result->number_off_files,
sizeof(FILEINFO), (qsort_cmp) comp_names);
- DBUG_PRINT(exit, ("found %d files", result->number_off_files));
+ DBUG_PRINT("exit", ("found %d files", result->number_off_files));
DBUG_RETURN(result);
error:
my_errno=errno;
diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c
index 3f601a42dc9..f33db2655c4 100644
--- a/mysys/my_malloc.c
+++ b/mysys/my_malloc.c
@@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags)
}
-char *my_strdup_with_length(const byte *from, uint length, myf my_flags)
+char *my_strdup_with_length(const char *from, uint length, myf my_flags)
{
gptr ptr;
if ((ptr=my_malloc(length+1,my_flags)) != 0)
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index 6cdf98c5f5f..f6d6644859e 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -525,7 +525,7 @@ char *_my_strdup(const char *from, const char *filename, uint lineno,
} /* _my_strdup */
-char *_my_strdup_with_length(const byte *from, uint length,
+char *_my_strdup_with_length(const char *from, uint length,
const char *filename, uint lineno,
myf MyFlags)
{
diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c
index f5a8b618949..51df50a4926 100644
--- a/mysys/thr_lock.c
+++ b/mysys/thr_lock.c
@@ -204,6 +204,8 @@ static void check_locks(THR_LOCK *lock, const char *where,
{
if ((int) data->type == (int) TL_READ_NO_INSERT)
count++;
+ /* Protect against infinite loop. */
+ DBUG_ASSERT(count <= lock->read_no_write_count);
}
if (count != lock->read_no_write_count)
{
diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h
index d60f7a2c582..ca82806f4b1 100644
--- a/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/ndb/include/kernel/GlobalSignalNumbers.h
@@ -607,8 +607,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_WAIT_GCP_REF 500
#define GSN_WAIT_GCP_CONF 501
-/* 502 not used */
-
/**
* Trigger and index signals
*/
@@ -678,6 +676,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_BACKUP_FRAGMENT_REF 546
#define GSN_BACKUP_FRAGMENT_CONF 547
+#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575
+
#define GSN_STOP_BACKUP_REQ 548
#define GSN_STOP_BACKUP_REF 549
#define GSN_STOP_BACKUP_CONF 550
@@ -727,7 +727,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_SUB_STOP_REQ 572
#define GSN_SUB_STOP_REF 573
#define GSN_SUB_STOP_CONF 574
-/* 575 unused */
+/* 575 used */
#define GSN_SUB_CREATE_REQ 576
#define GSN_SUB_CREATE_REF 577
#define GSN_SUB_CREATE_CONF 578
diff --git a/ndb/include/kernel/signaldata/BackupContinueB.hpp b/ndb/include/kernel/signaldata/BackupContinueB.hpp
index d3d3f79f310..fe3f48444ec 100644
--- a/ndb/include/kernel/signaldata/BackupContinueB.hpp
+++ b/ndb/include/kernel/signaldata/BackupContinueB.hpp
@@ -31,7 +31,8 @@ private:
BUFFER_UNDERFLOW = 1,
BUFFER_FULL_SCAN = 2,
BUFFER_FULL_FRAG_COMPLETE = 3,
- BUFFER_FULL_META = 4
+ BUFFER_FULL_META = 4,
+ BACKUP_FRAGMENT_INFO = 5
};
};
diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp
index 298440ad377..07ab5bc543b 100644
--- a/ndb/include/kernel/signaldata/BackupImpl.hpp
+++ b/ndb/include/kernel/signaldata/BackupImpl.hpp
@@ -258,15 +258,31 @@ class BackupFragmentConf {
friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 6 );
+ STATIC_CONST( SignalLength = 8 );
private:
Uint32 backupId;
Uint32 backupPtr;
Uint32 tableId;
Uint32 fragmentNo;
- Uint32 noOfRecords;
- Uint32 noOfBytes;
+ Uint32 noOfRecordsLow;
+ Uint32 noOfBytesLow;
+ Uint32 noOfRecordsHigh;
+ Uint32 noOfBytesHigh;
+};
+
+class BackupFragmentCompleteRep {
+public:
+ STATIC_CONST( SignalLength = 8 );
+
+ Uint32 backupId;
+ Uint32 backupPtr;
+ Uint32 tableId;
+ Uint32 fragmentNo;
+ Uint32 noOfTableRowsLow;
+ Uint32 noOfFragmentRowsLow;
+ Uint32 noOfTableRowsHigh;
+ Uint32 noOfFragmentRowsHigh;
};
class StopBackupReq {
diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp
index e1b8c6203a1..9e34ea3a211 100644
--- a/ndb/include/kernel/signaldata/BackupSignalData.hpp
+++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp
@@ -201,17 +201,19 @@ class BackupCompleteRep {
friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size );
+ STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size );
private:
Uint32 senderData;
Uint32 backupId;
Uint32 startGCP;
Uint32 stopGCP;
- Uint32 noOfBytes;
- Uint32 noOfRecords;
+ Uint32 noOfBytesLow;
+ Uint32 noOfRecordsLow;
Uint32 noOfLogBytes;
Uint32 noOfLogRecords;
NdbNodeBitmask nodes;
+ Uint32 noOfBytesHigh;
+ Uint32 noOfRecordsHigh;
};
/**
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
index bc4817f0cf3..0a7f6aa3fb3 100644
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -117,9 +117,16 @@ public:
CustomTriggerId = 25,
FrmLen = 26,
FrmData = 27,
+
FragmentCount = 128, // No of fragments in table (!fragment replicas)
FragmentDataLen = 129,
FragmentData = 130, // CREATE_FRAGMENTATION reply
+
+ MaxRowsLow = 139,
+ MaxRowsHigh = 140,
+ MinRowsLow = 143,
+ MinRowsHigh = 144,
+
TableEnd = 999,
AttributeName = 1000, // String, Mandatory
@@ -263,6 +270,10 @@ public:
Uint32 FragmentCount;
Uint32 FragmentDataLen;
Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2];
+ Uint32 MaxRowsLow;
+ Uint32 MaxRowsHigh;
+ Uint32 MinRowsLow;
+ Uint32 MinRowsHigh;
void init();
};
diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp
index 13dfafcc653..50b0caaba07 100644
--- a/ndb/include/kernel/signaldata/LqhFrag.hpp
+++ b/ndb/include/kernel/signaldata/LqhFrag.hpp
@@ -104,7 +104,7 @@ class LqhFragReq {
friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 25 );
+ STATIC_CONST( SignalLength = 24 );
enum RequestInfo {
CreateInRunning = 0x8000000,
@@ -115,27 +115,32 @@ private:
Uint32 senderData;
Uint32 senderRef;
Uint32 fragmentId;
- Uint32 requestInfo;
+ Uint8 requestInfo;
+ Uint8 unused1;
+ Uint16 noOfAttributes;
Uint32 tableId;
Uint32 localKeyLength;
- Uint32 maxLoadFactor;
- Uint32 minLoadFactor;
- Uint32 kValue;
+ Uint16 maxLoadFactor;
+ Uint16 minLoadFactor;
+ Uint16 kValue;
+ Uint8 tableType; // DictTabInfo::TableType
+ Uint8 GCPIndicator;
Uint32 lh3DistrBits;
Uint32 lh3PageBits;
- Uint32 noOfAttributes;
Uint32 noOfNullAttributes;
- Uint32 noOfPagesToPreAllocate;
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
Uint32 schemaVersion;
Uint32 keyLength;
Uint32 nextLCP;
Uint32 noOfKeyAttr;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
+ Uint16 noOfNewAttr;
+ Uint16 noOfCharsets;
Uint32 checksumIndicator;
Uint32 noOfAttributeGroups;
- Uint32 GCPIndicator;
Uint32 startGci;
- Uint32 tableType; // DictTabInfo::TableType
Uint32 primaryTableId; // table of index or RNIL
};
diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp
index 5fb9d7bcf42..c9f2ad5382f 100644
--- a/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/ndb/include/kernel/signaldata/TupFrag.hpp
@@ -30,7 +30,7 @@ class TupFragReq {
friend class Dblqh;
friend class Dbtup;
public:
- STATIC_CONST( SignalLength = 14 );
+ STATIC_CONST( SignalLength = 17 );
private:
Uint32 userPtr;
Uint32 userRef;
@@ -38,7 +38,18 @@ private:
Uint32 tableId;
Uint32 noOfAttr;
Uint32 fragId;
- Uint32 todo[8];
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
+ Uint32 noOfNullAttr;
+ Uint32 schemaVersion;
+ Uint32 noOfKeyAttr;
+ Uint16 noOfNewAttr;
+ Uint16 noOfCharsets;
+ Uint32 checksumIndicator;
+ Uint32 noOfAttributeGroups;
+ Uint32 globalCheckpointIdIndicator;
};
class TupFragConf {
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 1413931035d..e67a0253096 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -722,6 +722,20 @@ public:
*/
void setObjectType(Object::Type type);
+ /**
+ * Set/Get Maximum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMaxRows(Uint64 maxRows);
+ Uint64 getMaxRows() const;
+
+ /**
+ * Set/Get Minimum number of rows in table (only used to calculate
+ * number of partitions).
+ */
+ void setMinRows(Uint64 minRows);
+ Uint64 getMinRows() const;
+
/** @} *******************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp
index e9b0188d93b..855db0834bc 100644
--- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp
+++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp
@@ -100,8 +100,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){
BackupFragmentConf* sig = (BackupFragmentConf*)data;
fprintf(out, " backupPtr: %d backupId: %d\n",
sig->backupPtr, sig->backupId);
- fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n",
- sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes);
+ fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n",
+ sig->tableId, sig->fragmentNo,
+ sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32),
+ sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32));
return true;
}
diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
index 4b0a0e07b66..27fed22ac72 100644
--- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
+++ b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp
@@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){
bool
printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){
BackupCompleteRep* sig = (BackupCompleteRep*)data;
- fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n",
+ fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n",
sig->senderData,
sig->backupId,
- sig->noOfRecords,
- sig->noOfBytes);
+ sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32),
+ sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32));
return true;
}
diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index 43c129347c0..a1d8d82474d 100644
--- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -48,6 +48,10 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, FragmentCount, FragmentCount),
DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES),
DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen),
+ DTIMAP(Table, MaxRowsLow, MaxRowsLow),
+ DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
+ DTIMAP(Table, MinRowsLow, MinRowsLow),
+ DTIMAP(Table, MinRowsHigh, MinRowsHigh),
DTIBREAK(AttributeName)
};
@@ -124,6 +128,10 @@ DictTabInfo::Table::init(){
FragmentCount = 0;
FragmentDataLen = 0;
memset(FragmentData, 0, sizeof(FragmentData));
+ MaxRowsLow = 0;
+ MaxRowsHigh = 0;
+ MinRowsLow = 0;
+ MinRowsHigh = 0;
}
void
diff --git a/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/ndb/src/common/debugger/signaldata/LqhFrag.cpp
index 6d727959a67..3175582c3a2 100644
--- a/ndb/src/common/debugger/signaldata/LqhFrag.cpp
+++ b/ndb/src/common/debugger/signaldata/LqhFrag.cpp
@@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB
fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n",
sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength);
- fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n",
- sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP);
+ fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n",
+ sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh);
+ fprintf(output, " schemaVersion: %d nextLCP: %d\n",
+ sig->schemaVersion, sig->nextLCP);
return true;
}
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index f9089355475..43c1de5e2b3 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -266,6 +266,65 @@ Backup::execCONTINUEB(Signal* signal)
const Uint32 Tdata2 = signal->theData[2];
switch(Tdata0) {
+ case BackupContinueB::BACKUP_FRAGMENT_INFO:
+ {
+ const Uint32 ptr_I = Tdata1;
+ Uint32 tabPtr_I = Tdata2;
+ Uint32 fragPtr_I = signal->theData[3];
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, ptr_I);
+ TablePtr tabPtr;
+ ptr.p->tables.getPtr(tabPtr, tabPtr_I);
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I);
+
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
+
+ const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2;
+ Uint32 * dst;
+ if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz))
+ {
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ BackupFormat::CtlFile::FragmentInfo * fragInfo =
+ (BackupFormat::CtlFile::FragmentInfo*)dst;
+ fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO);
+ fragInfo->SectionLength = htonl(sz);
+ fragInfo->TableId = htonl(fragPtr.p->tableId);
+ fragInfo->FragmentNo = htonl(fragPtr_I);
+ fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF);
+ fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32);
+ fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF);
+ fragInfo->FilePosHigh = htonl(0 >> 32);
+
+ filePtr.p->operation.dataBuffer.updateWritePtr(sz);
+
+ fragPtr_I++;
+ if (fragPtr_I == tabPtr.p->fragments.getSize())
+ {
+ signal->theData[0] = tabPtr.p->tableId;
+ signal->theData[1] = 0; // unlock
+ EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
+
+ fragPtr_I = 0;
+ ptr.p->tables.next(tabPtr);
+ if ((tabPtr_I = tabPtr.i) == RNIL)
+ {
+ closeFiles(signal, ptr);
+ return;
+ }
+ }
+ signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
+ signal->theData[1] = ptr_I;
+ signal->theData[2] = tabPtr_I;
+ signal->theData[3] = fragPtr_I;
+ sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }
case BackupContinueB::START_FILE_THREAD:
case BackupContinueB::BUFFER_UNDERFLOW:
{
@@ -455,7 +514,7 @@ Backup::findTable(const BackupRecordPtr & ptr,
return false;
}
-static Uint32 xps(Uint32 x, Uint64 ms)
+static Uint32 xps(Uint64 x, Uint64 ms)
{
float fx = x;
float fs = ms;
@@ -469,9 +528,9 @@ static Uint32 xps(Uint32 x, Uint64 ms)
}
struct Number {
- Number(Uint32 r) { val = r;}
- Number & operator=(Uint32 r) { val = r; return * this; }
- Uint32 val;
+ Number(Uint64 r) { val = r;}
+ Number & operator=(Uint64 r) { val = r; return * this; }
+ Uint64 val;
};
NdbOut &
@@ -545,8 +604,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal)
startTime = NdbTick_CurrentMillisecond() - startTime;
ndbout_c("Backup %d has completed", rep->backupId);
- const Uint32 bytes = rep->noOfBytes;
- const Uint32 records = rep->noOfRecords;
+ const Uint64 bytes =
+ rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32);
+ const Uint64 records =
+ rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32);
Number rps = xps(records, startTime);
Number bps = xps(bytes, startTime);
@@ -1905,8 +1966,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
const Uint32 tableId = conf->tableId;
const Uint32 fragmentNo = conf->fragmentNo;
const Uint32 nodeId = refToNode(signal->senderBlockRef());
- const Uint32 noOfBytes = conf->noOfBytes;
- const Uint32 noOfRecords = conf->noOfRecords;
+ const Uint64 noOfBytes =
+ conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32);
+ const Uint64 noOfRecords =
+ conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32);
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptrI);
@@ -1918,9 +1981,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, tableId));
+ tabPtr.p->noOfRecords += noOfRecords;
+
FragmentPtr fragPtr;
tabPtr.p->fragments.getPtr(fragPtr, fragmentNo);
+ fragPtr.p->noOfRecords = noOfRecords;
+
ndbrequire(fragPtr.p->scanned == 0);
ndbrequire(fragPtr.p->scanning == 1);
ndbrequire(fragPtr.p->node == nodeId);
@@ -1944,6 +2011,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal)
}
else
{
+ NodeBitmask nodes = ptr.p->nodes;
+ nodes.clear(getOwnNodeId());
+ if (!nodes.isclear())
+ {
+ BackupFragmentCompleteRep *rep =
+ (BackupFragmentCompleteRep*)signal->getDataPtrSend();
+ rep->backupId = ptr.p->backupId;
+ rep->backupPtr = ptr.i;
+ rep->tableId = tableId;
+ rep->fragmentNo = fragmentNo;
+ rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF);
+ rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32);
+ rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF);
+ rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32);
+ NodeReceiverGroup rg(BACKUP, ptr.p->nodes);
+ sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal,
+ BackupFragmentCompleteRep::SignalLength, JBB);
+ }
nextFragment(signal, ptr);
}
}
@@ -2006,6 +2091,29 @@ err:
execABORT_BACKUP_ORD(signal);
}
+void
+Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal)
+{
+ jamEntry();
+ BackupFragmentCompleteRep * rep =
+ (BackupFragmentCompleteRep*)signal->getDataPtr();
+
+ BackupRecordPtr ptr;
+ c_backupPool.getPtr(ptr, rep->backupPtr);
+
+ TablePtr tabPtr;
+ ndbrequire(findTable(ptr, tabPtr, rep->tableId));
+
+ tabPtr.p->noOfRecords =
+ rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32);
+
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo);
+
+ fragPtr.p->noOfRecords =
+ rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32);
+}
+
/*****************************************************************************
*
* Master functionallity - Drop triggers
@@ -2206,8 +2314,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
rep->senderData = ptr.p->clientData;
rep->startGCP = ptr.p->startGCP;
rep->stopGCP = ptr.p->stopGCP;
- rep->noOfBytes = ptr.p->noOfBytes;
- rep->noOfRecords = ptr.p->noOfRecords;
+ rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF);
+ rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF);
+ rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32);
+ rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32);
rep->noOfLogBytes = ptr.p->noOfLogBytes;
rep->noOfLogRecords = ptr.p->noOfLogRecords;
rep->nodes = ptr.p->nodes;
@@ -2220,12 +2330,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
signal->theData[2] = ptr.p->backupId;
signal->theData[3] = ptr.p->startGCP;
signal->theData[4] = ptr.p->stopGCP;
- signal->theData[5] = ptr.p->noOfBytes;
- signal->theData[6] = ptr.p->noOfRecords;
+ signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF);
+ signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF);
signal->theData[7] = ptr.p->noOfLogBytes;
signal->theData[8] = ptr.p->noOfLogRecords;
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
+ signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32);
+ signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB);
}
else
{
@@ -2988,6 +3100,7 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
/**
* Initialize table object
*/
+ tabPtr.p->noOfRecords = 0;
tabPtr.p->schemaVersion = tmpTab.TableVersion;
tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes;
tabPtr.p->noOfNull = 0;
@@ -3695,8 +3808,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr)
conf->backupPtr = ptr.i;
conf->tableId = filePtr.p->tableId;
conf->fragmentNo = filePtr.p->fragmentNo;
- conf->noOfRecords = op.noOfRecords;
- conf->noOfBytes = op.noOfBytes;
+ conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF);
+ conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32);
+ conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF);
+ conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32);
sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal,
BackupFragmentConf::SignalLength, JBB);
@@ -4123,20 +4238,18 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal)
gcp->StartGCP = htonl(startGCP);
gcp->StopGCP = htonl(stopGCP - 1);
filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz);
- }
- {
- TablePtr tabPtr;
- for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;
- ptr.p->tables.next(tabPtr))
{
- signal->theData[0] = tabPtr.p->tableId;
- signal->theData[1] = 0; // unlock
- EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2);
+ TablePtr tabPtr;
+ ptr.p->tables.first(tabPtr);
+
+ signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
+ signal->theData[1] = ptr.i;
+ signal->theData[2] = tabPtr.i;
+ signal->theData[3] = 0;
+ sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB);
}
}
-
- closeFiles(signal, ptr);
}
void
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp
index c455e32fa67..e37923da749 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -68,6 +68,7 @@ protected:
void execBACKUP_DATA(Signal* signal);
void execSTART_BACKUP_REQ(Signal* signal);
void execBACKUP_FRAGMENT_REQ(Signal* signal);
+ void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal);
void execSTOP_BACKUP_REQ(Signal* signal);
void execBACKUP_STATUS_REQ(Signal* signal);
void execABORT_BACKUP_ORD(Signal* signal);
@@ -183,10 +184,12 @@ public:
typedef Ptr<Attribute> AttributePtr;
struct Fragment {
+ Uint64 noOfRecords;
Uint32 tableId;
- Uint32 node;
- Uint16 scanned; // 0 = not scanned x = scanned by node x
- Uint16 scanning; // 0 = not scanning x = scanning on node x
+ Uint8 node;
+ Uint8 scanned; // 0 = not scanned x = scanned by node x
+ Uint8 scanning; // 0 = not scanning x = scanning on node x
+ Uint8 unused1;
Uint32 nextPool;
};
typedef Ptr<Fragment> FragmentPtr;
@@ -194,6 +197,8 @@ public:
struct Table {
Table(ArrayPool<Attribute> &, ArrayPool<Fragment> &);
+ Uint64 noOfRecords;
+
Uint32 tableId;
Uint32 schemaVersion;
Uint32 tableType;
@@ -269,8 +274,8 @@ public:
Uint32 tablePtr; // Ptr.i to current table
FsBuffer dataBuffer;
- Uint32 noOfRecords;
- Uint32 noOfBytes;
+ Uint64 noOfRecords;
+ Uint64 noOfBytes;
Uint32 maxRecordSize;
private:
diff --git a/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/ndb/src/kernel/blocks/backup/BackupFormat.hpp
index 65dd2ad9053..b8ffff3a294 100644
--- a/ndb/src/kernel/blocks/backup/BackupFormat.hpp
+++ b/ndb/src/kernel/blocks/backup/BackupFormat.hpp
@@ -32,7 +32,8 @@ struct BackupFormat {
FRAGMENT_FOOTER = 3,
TABLE_LIST = 4,
TABLE_DESCRIPTION = 5,
- GCP_ENTRY = 6
+ GCP_ENTRY = 6,
+ FRAGMENT_INFO = 7
};
struct FileHeader {
@@ -126,6 +127,20 @@ struct BackupFormat {
Uint32 StartGCP;
Uint32 StopGCP;
};
+
+ /**
+ * Fragment Info
+ */
+ struct FragmentInfo {
+ Uint32 SectionType;
+ Uint32 SectionLength;
+ Uint32 TableId;
+ Uint32 FragmentNo;
+ Uint32 NoOfRecordsLow;
+ Uint32 NoOfRecordsHigh;
+ Uint32 FilePosLow;
+ Uint32 FilePosHigh;
+ };
};
/**
diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp
index 4c734d58c8e..96c11468939 100644
--- a/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -97,6 +97,9 @@ Backup::Backup(const Configuration & conf) :
addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ);
addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF);
addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF);
+
+ addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP,
+ &Backup::execBACKUP_FRAGMENT_COMPLETE_REP);
addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ);
addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF);
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 3cdba251492..133b4d75d8e 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -286,6 +286,10 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
+ w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow);
+ w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh);
+ w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow);
+ w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh);
if(!signal)
{
@@ -1535,6 +1539,10 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->minLoadFactor = 70;
tablePtr.p->noOfPrimkey = 1;
tablePtr.p->tupKeyLength = 1;
+ tablePtr.p->maxRowsLow = 0;
+ tablePtr.p->maxRowsHigh = 0;
+ tablePtr.p->minRowsLow = 0;
+ tablePtr.p->minRowsHigh = 0;
tablePtr.p->storedTable = true;
tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL;
@@ -4501,6 +4509,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
Uint32 lhPageBits = 0;
::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount);
+ Uint64 maxRows = tabPtr.p->maxRowsLow +
+ (((Uint64)tabPtr.p->maxRowsHigh) << 32);
+ Uint64 minRows = tabPtr.p->minRowsLow +
+ (((Uint64)tabPtr.p->minRowsHigh) << 32);
+ maxRows = (maxRows + fragCount - 1) / fragCount;
+ minRows = (minRows + fragCount - 1) / fragCount;
+
{
LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend();
req->senderData = senderData;
@@ -4516,7 +4531,10 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->lh3PageBits = 0; //lhPageBits;
req->noOfAttributes = tabPtr.p->noOfAttributes;
req->noOfNullAttributes = tabPtr.p->noOfNullBits;
- req->noOfPagesToPreAllocate = 0;
+ req->maxRowsLow = maxRows & 0xFFFFFFFF;
+ req->maxRowsHigh = maxRows >> 32;
+ req->minRowsLow = minRows & 0xFFFFFFFF;
+ req->minRowsHigh = minRows >> 32;
req->schemaVersion = tabPtr.p->tableVersion;
Uint32 keyLen = tabPtr.p->tupKeyLength;
req->keyLength = keyLen; // wl-2066 no more "long keys"
@@ -4524,8 +4542,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
req->noOfNewAttr = 0;
- // noOfCharsets passed to TUP in upper half
- req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
+ req->noOfCharsets = tabPtr.p->noOfCharsets;
req->checksumIndicator = 1;
req->noOfAttributeGroups = 1;
req->GCPIndicator = 0;
@@ -5091,6 +5108,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
tablePtr.p->kValue = tableDesc.TableKValue;
tablePtr.p->fragmentCount = tableDesc.FragmentCount;
+ tablePtr.p->maxRowsLow = tableDesc.MaxRowsLow;
+ tablePtr.p->maxRowsHigh = tableDesc.MaxRowsHigh;
+ tablePtr.p->minRowsLow = tableDesc.MinRowsLow;
+ tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh;
+
+ Uint64 maxRows =
+ (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow;
tablePtr.p->frmLen = tableDesc.FrmLen;
memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 9c0bf65b69c..91e57720d01 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -134,6 +134,10 @@ public:
* on disk. Index trigger ids are volatile.
*/
struct TableRecord : public MetaData::Table {
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
/****************************************************
* Support variables for table handling
****************************************************/
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 1ed383853ba..f8e6292f7f2 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -443,7 +443,6 @@ public:
UintR dictConnectptr;
UintR fragmentPtr;
UintR nextAddfragrec;
- UintR noOfAllocPages;
UintR schemaVer;
UintR tup1Connectptr;
UintR tup2Connectptr;
@@ -465,12 +464,17 @@ public:
Uint16 totalAttrReceived;
Uint16 fragCopyCreation;
Uint16 noOfKeyAttr;
- Uint32 noOfNewAttr; // noOfCharsets in upper half
+ Uint16 noOfNewAttr;
+ Uint16 noOfCharsets;
Uint16 noOfAttributeGroups;
Uint16 lh3DistrBits;
Uint16 tableType;
Uint16 primaryTableId;
- };// Size 108 bytes
+ Uint32 maxRowsLow;
+ Uint32 maxRowsHigh;
+ Uint32 minRowsLow;
+ Uint32 minRowsHigh;
+ };// Size 124 bytes
typedef Ptr<AddFragRecord> AddFragRecordPtr;
/* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 56e93e6ee01..ecb67d04050 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -939,12 +939,16 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
Uint8 tlh = req->lh3PageBits;
Uint32 tnoOfAttr = req->noOfAttributes;
Uint32 tnoOfNull = req->noOfNullAttributes;
- Uint32 noOfAlloc = req->noOfPagesToPreAllocate;
+ Uint32 maxRowsLow = req->maxRowsLow;
+ Uint32 maxRowsHigh = req->maxRowsHigh;
+ Uint32 minRowsLow = req->minRowsLow;
+ Uint32 minRowsHigh = req->minRowsHigh;
Uint32 tschemaVersion = req->schemaVersion;
Uint32 ttupKeyLength = req->keyLength;
Uint32 nextLcp = req->nextLCP;
Uint32 noOfKeyAttr = req->noOfKeyAttr;
Uint32 noOfNewAttr = req->noOfNewAttr;
+ Uint32 noOfCharsets = req->noOfCharsets;
Uint32 checksumIndicator = req->checksumIndicator;
Uint32 noOfAttributeGroups = req->noOfAttributeGroups;
Uint32 gcpIndicator = req->GCPIndicator;
@@ -1042,7 +1046,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->m_senderAttrPtr = RNIL;
addfragptr.p->noOfAttr = tnoOfAttr;
addfragptr.p->noOfNull = tnoOfNull;
- addfragptr.p->noOfAllocPages = noOfAlloc;
+ addfragptr.p->maxRowsLow = maxRowsLow;
+ addfragptr.p->maxRowsHigh = maxRowsHigh;
+ addfragptr.p->minRowsLow = minRowsLow;
+ addfragptr.p->minRowsHigh = minRowsHigh;
addfragptr.p->tabId = tabptr.i;
addfragptr.p->totalAttrReceived = 0;
addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */
@@ -1052,6 +1059,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->addfragErrorCode = 0;
addfragptr.p->noOfKeyAttr = noOfKeyAttr;
addfragptr.p->noOfNewAttr = noOfNewAttr;
+ addfragptr.p->noOfCharsets = noOfCharsets;
addfragptr.p->checksumIndicator = checksumIndicator;
addfragptr.p->noOfAttributeGroups = noOfAttributeGroups;
addfragptr.p->GCPIndicator = gcpIndicator;
@@ -1221,47 +1229,56 @@ Dblqh::sendAddFragReq(Signal* signal)
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ||
addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) {
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
if (DictTabInfo::isTable(addfragptr.p->tableType) ||
DictTabInfo::isHashIndex(addfragptr.p->tableType)) {
jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = addfragptr.p->noOfAttr;
- signal->theData[5] =
+ tupFragReq->userPtr = addfragptr.i;
+ tupFragReq->userRef = cownref;
+ tupFragReq->reqInfo = 0; /* ADD TABLE */
+ tupFragReq->tableId = addfragptr.p->tabId;
+ tupFragReq->noOfAttr = addfragptr.p->noOfAttr;
+ tupFragReq->fragId =
addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
? addfragptr.p->fragid1 : addfragptr.p->fragid2;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = addfragptr.p->noOfNull;
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = addfragptr.p->noOfKeyAttr;
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
+ tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow;
+ tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh;
+ tupFragReq->minRowsLow = addfragptr.p->minRowsLow;
+ tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh;
+ tupFragReq->noOfNullAttr = addfragptr.p->noOfNull;
+ tupFragReq->schemaVersion = addfragptr.p->schemaVer;
+ tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr;
+ tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr;
+ tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets;
+ tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator;
+ tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups;
+ tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
signal, TupFragReq::SignalLength, JBB);
return;
}
if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
jam();
- signal->theData[0] = addfragptr.i;
- signal->theData[1] = cownref;
- signal->theData[2] = 0; /* ADD TABLE */
- signal->theData[3] = addfragptr.p->tabId;
- signal->theData[4] = 1; /* ordered index: one array attr */
- signal->theData[5] =
+ tupFragReq->userPtr = addfragptr.i;
+ tupFragReq->userRef = cownref;
+ tupFragReq->reqInfo = 0; /* ADD TABLE */
+ tupFragReq->tableId = addfragptr.p->tabId;
+ tupFragReq->noOfAttr = 1; /* ordered index: one array attr */
+ tupFragReq->fragId =
addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP
? addfragptr.p->fragid1 : addfragptr.p->fragid2;
- signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1;
- signal->theData[7] = 0; /* ordered index: no nullable */
- signal->theData[8] = addfragptr.p->schemaVer;
- signal->theData[9] = 1; /* ordered index: one key */
- signal->theData[10] = addfragptr.p->noOfNewAttr;
- signal->theData[11] = addfragptr.p->checksumIndicator;
- signal->theData[12] = addfragptr.p->noOfAttributeGroups;
- signal->theData[13] = addfragptr.p->GCPIndicator;
+ tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow;
+ tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh;
+ tupFragReq->minRowsLow = addfragptr.p->minRowsLow;
+ tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh;
+ tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */
+ tupFragReq->schemaVersion = addfragptr.p->schemaVer;
+ tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */
+ tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr;
+ tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets;
+ tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator;
+ tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups;
+ tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ,
signal, TupFragReq::SignalLength, JBB);
return;
@@ -1580,28 +1597,35 @@ void Dblqh::abortAddFragOps(Signal* signal)
{
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- signal->theData[0] = (Uint32)-1;
if (addfragptr.p->tup1Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tup1Connectptr;
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
+ tupFragReq->userPtr = (Uint32)-1;
+ tupFragReq->userRef = addfragptr.p->tup1Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup1Connectptr = RNIL;
}
if (addfragptr.p->tup2Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tup2Connectptr;
+ TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend();
+ tupFragReq->userPtr = (Uint32)-1;
+ tupFragReq->userRef = addfragptr.p->tup2Connectptr;
sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
addfragptr.p->tup2Connectptr = RNIL;
}
if (addfragptr.p->tux1Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tux1Connectptr;
+ TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxFragReq->userPtr = (Uint32)-1;
+ tuxFragReq->userRef = addfragptr.p->tux1Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux1Connectptr = RNIL;
}
if (addfragptr.p->tux2Connectptr != RNIL) {
jam();
- signal->theData[1] = addfragptr.p->tux2Connectptr;
+ TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend();
+ tuxFragReq->userPtr = (Uint32)-1;
+ tuxFragReq->userRef = addfragptr.p->tux2Connectptr;
sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
addfragptr.p->tux2Connectptr = RNIL;
}
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index cf3c6056d65..41194fba82c 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -496,7 +496,8 @@ struct DiskBufferSegmentInfo {
typedef Ptr<DiskBufferSegmentInfo> DiskBufferSegmentInfoPtr;
struct Fragoperrec {
- bool definingFragment;
+ Uint64 minRows;
+ Uint64 maxRows;
Uint32 nextFragoprec;
Uint32 lqhPtrFrag;
Uint32 fragidFrag;
@@ -509,6 +510,7 @@ struct Fragoperrec {
Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
bool inUse;
+ bool definingFragment;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@@ -560,6 +562,7 @@ struct Fragrecord {
Uint32 currentPageRange;
Uint32 rootPageRange;
Uint32 noOfPages;
+ Uint32 noOfPagesToGrow;
Uint32 emptyPrimPage;
Uint32 firstusedOprec;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index bacba2a880c..12cd61a17a6 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -41,7 +41,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
{
ljamEntry();
- if (signal->theData[0] == (Uint32)-1) {
+ TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr();
+ if (tupFragReq->userPtr == (Uint32)-1) {
ljam();
abortAddFragOp(signal);
return;
@@ -51,30 +52,34 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
- Uint32 userptr = signal->theData[0];
- Uint32 userblockref = signal->theData[1];
- Uint32 reqinfo = signal->theData[2];
- regTabPtr.i = signal->theData[3];
- Uint32 noOfAttributes = signal->theData[4];
- Uint32 fragId = signal->theData[5];
- Uint32 noOfNullAttr = signal->theData[7];
- /* Uint32 schemaVersion = signal->theData[8];*/
- Uint32 noOfKeyAttr = signal->theData[9];
+ Uint32 userptr = tupFragReq->userPtr;
+ Uint32 userblockref = tupFragReq->userRef;
+ Uint32 reqinfo = tupFragReq->reqInfo;
+ regTabPtr.i = tupFragReq->tableId;
+ Uint32 noOfAttributes = tupFragReq->noOfAttr;
+ Uint32 fragId = tupFragReq->fragId;
+ Uint32 noOfNullAttr = tupFragReq->noOfNullAttr;
+ /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/
+ Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr;
- Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF);
- /* DICT sends number of character sets in upper half */
- Uint32 noOfCharsets = (signal->theData[10] >> 16);
+ Uint32 noOfNewAttr = tupFragReq->noOfNewAttr;
+ Uint32 noOfCharsets = tupFragReq->noOfCharsets;
- Uint32 checksumIndicator = signal->theData[11];
- Uint32 noOfAttributeGroups = signal->theData[12];
- Uint32 globalCheckpointIdIndicator = signal->theData[13];
+ Uint32 checksumIndicator = tupFragReq->checksumIndicator;
+ Uint32 noOfAttributeGroups = tupFragReq->noOfAttributeGroups;
+ Uint32 globalCheckpointIdIndicator = tupFragReq->globalCheckpointIdIndicator;
+
+ Uint64 maxRows =
+ (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow;
#ifndef VM_TRACE
// config mismatch - do not crash if release compiled
if (regTabPtr.i >= cnoOfTablerec) {
ljam();
- signal->theData[0] = userptr;
- signal->theData[1] = 800;
+ tupFragReq->userPtr = userptr;
+ tupFragReq->userRef = 800;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
return;
}
@@ -83,8 +88,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
if (cfirstfreeFragopr == RNIL) {
ljam();
- signal->theData[0] = userptr;
- signal->theData[1] = ZNOFREE_FRAGOP_ERROR;
+ tupFragReq->userPtr = userptr;
+ tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR;
sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
return;
}//if
@@ -100,6 +105,9 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
fragOperPtr.p->charsetIndex = 0;
fragOperPtr.p->currNullBit = 0;
+ // remove in 5.1, 2 fragments per fragment in 5.0
+ fragOperPtr.p->minRows = (minRows + 1)/2;
+ fragOperPtr.p->maxRows = (maxRows + 1)/2;
ndbrequire(reqinfo == ZADDFRAG);
@@ -141,16 +149,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regFragPtr.p->fragmentId = fragId;
regFragPtr.p->checkpointVersion = RNIL;
- Uint32 noAllocatedPages = 2;
- noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
-
- if (noAllocatedPages == 0) {
- ljam();
- terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
- fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
- return;
- }//if
-
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
ljam();
@@ -407,6 +405,27 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
CLEAR_ERROR_INSERT_VALUE;
return;
}
+
+ if (lastAttr)
+ {
+ ljam();
+ Uint32 noRowsPerPage = ZWORDS_ON_PAGE/regTabPtr.p->tupheadsize;
+ Uint32 noAllocatedPages =
+ (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage;
+ if (fragOperPtr.p->minRows == 0)
+ noAllocatedPages = 2;
+ else if (noAllocatedPages == 0)
+ noAllocatedPages = 2;
+ noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages);
+
+ if (noAllocatedPages == 0) {
+ ljam();
+ terrorCode = ZNO_PAGES_ALLOCATED_ERROR;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }//if
+ }
+
/* **************************************************************** */
/* ************** TUP_ADD_ATTCONF ****************** */
/* **************************************************************** */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index 1f674876642..acdb73704cb 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -332,6 +332,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr)
regFragPtr->rootPageRange = RNIL;
regFragPtr->currentPageRange = RNIL;
regFragPtr->noOfPages = 0;
+ regFragPtr->noOfPagesToGrow = 2;
regFragPtr->nextStartRange = 0;
}//initFragRange()
@@ -393,9 +394,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocReq
void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
{
- Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5%
- noAllocPages += regFragPtr->noOfPages >> 4; // 6.25%
+ Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5%
+ noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25%
noAllocPages += 2;
+ regFragPtr->noOfPagesToGrow += noAllocPages;
/* -----------------------------------------------------------------*/
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index b9466ed1173..69c0286a1de 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -2380,14 +2380,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
event.Event = BackupEvent::BackupCompleted;
event.Completed.BackupId = rep->backupId;
- event.Completed.NoOfBytes = rep->noOfBytes;
+ event.Completed.NoOfBytes = rep->noOfBytesLow;
event.Completed.NoOfLogBytes = rep->noOfLogBytes;
- event.Completed.NoOfRecords = rep->noOfRecords;
+ event.Completed.NoOfRecords = rep->noOfRecordsLow;
event.Completed.NoOfLogRecords = rep->noOfLogRecords;
event.Completed.stopGCP = rep->stopGCP;
event.Completed.startGCP = rep->startGCP;
event.Nodes = rep->nodes;
+ if (signal->header.theLength >= BackupCompleteRep::SignalLength)
+ {
+ event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32;
+ event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32;
+ }
+
backupId = rep->backupId;
return 0;
}
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 7811cf0e5d1..187f225470a 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -323,9 +323,9 @@ public:
Uint32 ErrorCode;
} FailedToStart ;
struct {
+ Uint64 NoOfBytes;
+ Uint64 NoOfRecords;
Uint32 BackupId;
- Uint32 NoOfBytes;
- Uint32 NoOfRecords;
Uint32 NoOfLogBytes;
Uint32 NoOfLogRecords;
Uint32 startGCP;
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index a342a5d5926..a0a3dd431b8 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -385,6 +385,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const {
return m_impl.m_noOfKeys;
}
+void
+NdbDictionary::Table::setMaxRows(Uint64 maxRows)
+{
+ m_impl.m_max_rows = maxRows;
+}
+
+Uint64
+NdbDictionary::Table::getMaxRows() const
+{
+ return m_impl.m_max_rows;
+}
+
+void
+NdbDictionary::Table::setMinRows(Uint64 minRows)
+{
+ m_impl.m_min_rows = minRows;
+}
+
+Uint64
+NdbDictionary::Table::getMinRows() const
+{
+ return m_impl.m_min_rows;
+}
+
const char*
NdbDictionary::Table::getPrimaryKey(int no) const {
int count = 0;
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index bd50440b3c0..ce348b616c9 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -319,6 +319,8 @@ NdbTableImpl::init(){
m_noOfDistributionKeys= 0;
m_noOfBlobs= 0;
m_replicaCount= 0;
+ m_min_rows = 0;
+ m_max_rows = 0;
}
bool
@@ -416,6 +418,9 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_version = org.m_version;
m_status = org.m_status;
+
+ m_max_rows = org.m_max_rows;
+ m_min_rows = org.m_min_rows;
}
void NdbTableImpl::setName(const char * name)
@@ -1302,6 +1307,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
fragmentTypeMapping,
(Uint32)NdbDictionary::Object::FragUndefined);
+ Uint64 max_rows = ((Uint64)tableDesc.MaxRowsHigh) << 32;
+ max_rows += tableDesc.MaxRowsLow;
+ impl->m_max_rows = max_rows;
+ Uint64 min_rows = ((Uint64)tableDesc.MinRowsHigh) << 32;
+ min_rows += tableDesc.MinRowsLow;
+ impl->m_min_rows = min_rows;
impl->m_logging = tableDesc.TableLoggedFlag;
impl->m_kvalue = tableDesc.TableKValue;
impl->m_minLoadFactor = tableDesc.MinLoadFactor;
@@ -1630,7 +1641,16 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
tmpTab.TableType = DictTabInfo::UserTable;
tmpTab.NoOfAttributes = sz;
+ tmpTab.MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32);
+ tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
+ tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32);
+ tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF);
+ Uint64 maxRows =
+ (((Uint64)tmpTab.MaxRowsHigh) << 32) + tmpTab.MaxRowsLow;
+ Uint64 minRows =
+ (((Uint64)tmpTab.MinRowsHigh) << 32) + tmpTab.MinRowsLow;
+
tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
fragmentTypeMapping,
DictTabInfo::AllNodesSmallTable);
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index bc9894497f8..dfccf120228 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -130,6 +130,9 @@ public:
Uint32 m_hashpointerValue;
Vector<Uint16> m_fragments;
+ Uint64 m_max_rows;
+ Uint64 m_min_rows;
+
bool m_logging;
int m_kvalue;
int m_minLoadFactor;
diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp
index 6ac06f8a6f8..a808a48b558 100644
--- a/ndb/tools/restore/Restore.cpp
+++ b/ndb/tools/restore/Restore.cpp
@@ -80,7 +80,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) {
RestoreMetaData::~RestoreMetaData(){
for(Uint32 i= 0; i < allTables.size(); i++)
- delete allTables[i];
+ {
+ TableS *table = allTables[i];
+ for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++)
+ delete table->m_fragmentInfo[j];
+ delete table;
+ }
allTables.clear();
}
@@ -111,6 +116,9 @@ RestoreMetaData::loadContent()
}
if(!readGCPEntry())
return 0;
+
+ if(!readFragmentInfo())
+ return 0;
return 1;
}
@@ -192,6 +200,52 @@ RestoreMetaData::readGCPEntry() {
return true;
}
+bool
+RestoreMetaData::readFragmentInfo()
+{
+ BackupFormat::CtlFile::FragmentInfo fragInfo;
+ TableS * table = 0;
+ Uint32 tableId = RNIL;
+
+ while (buffer_read(&fragInfo, 4, 2) == 2)
+ {
+ fragInfo.SectionType = ntohl(fragInfo.SectionType);
+ fragInfo.SectionLength = ntohl(fragInfo.SectionLength);
+
+ if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO)
+ {
+ err << "readFragmentInfo invalid section type: " <<
+ fragInfo.SectionType << endl;
+ return false;
+ }
+
+ if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1)
+ {
+ err << "readFragmentInfo invalid section length: " <<
+ fragInfo.SectionLength << endl;
+ return false;
+ }
+
+ fragInfo.TableId = ntohl(fragInfo.TableId);
+ if (fragInfo.TableId != tableId)
+ {
+ tableId = fragInfo.TableId;
+ table = getTable(tableId);
+ }
+
+ FragmentInfo * tmp = new FragmentInfo;
+ tmp->fragmentNo = ntohl(fragInfo.FragmentNo);
+ tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) +
+ (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32);
+ tmp->filePosLow = ntohl(fragInfo.FilePosLow);
+ tmp->filePosHigh = ntohl(fragInfo.FilePosHigh);
+
+ table->m_fragmentInfo.push_back(tmp);
+ table->m_noOfRecords += tmp->noOfRecords;
+ }
+ return true;
+}
+
TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
: m_dictTable(tableImpl)
{
@@ -199,6 +253,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
m_noOfNullable = m_nullBitmaskSize = 0;
m_auto_val_id= ~(Uint32)0;
m_max_auto_val= 0;
+ m_noOfRecords= 0;
backupVersion = version;
for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
@@ -937,4 +992,5 @@ operator<<(NdbOut& ndbout, const TableS & table){
template class Vector<TableS*>;
template class Vector<AttributeS*>;
template class Vector<AttributeDesc*>;
+template class Vector<FragmentInfo*>;
diff --git a/ndb/tools/restore/Restore.hpp b/ndb/tools/restore/Restore.hpp
index 85793baf9df..cf8feb7125c 100644
--- a/ndb/tools/restore/Restore.hpp
+++ b/ndb/tools/restore/Restore.hpp
@@ -114,6 +114,14 @@ public:
AttributeData * getData(int i) const;
}; // class TupleS
+struct FragmentInfo
+{
+ Uint32 fragmentNo;
+ Uint64 noOfRecords;
+ Uint32 filePosLow;
+ Uint32 filePosHigh;
+};
+
class TableS {
friend class TupleS;
@@ -136,6 +144,9 @@ class TableS {
int pos;
+ Uint64 m_noOfRecords;
+ Vector<FragmentInfo *> m_fragmentInfo;
+
void createAttr(NdbDictionary::Column *column);
public:
@@ -146,6 +157,9 @@ public:
Uint32 getTableId() const {
return m_dictTable->getTableId();
}
+ Uint32 getNoOfRecords() const {
+ return m_noOfRecords;
+ }
/*
void setMysqlTableName(char * tableName) {
strpcpy(mysqlTableName, tableName);
@@ -274,6 +288,7 @@ class RestoreMetaData : public BackupFile {
bool readMetaTableDesc();
bool readGCPEntry();
+ bool readFragmentInfo();
Uint32 readMetaTableList();
Uint32 m_startGCP;
diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp
index d62ca3f610a..bff63c28716 100644
--- a/ndb/tools/restore/consumer_restore.cpp
+++ b/ndb/tools/restore/consumer_restore.cpp
@@ -193,6 +193,16 @@ BackupRestore::table(const TableS & table){
copy.setName(split[2].c_str());
+ /*
+ update min and max rows to reflect the table, this to
+ ensure that memory is allocated properly in the ndb kernel
+ */
+ copy.setMinRows(table.getNoOfRecords());
+ if (table.getNoOfRecords() > copy.getMaxRows())
+ {
+ copy.setMaxRows(table.getNoOfRecords());
+ }
+
if (dict->createTable(copy) == -1)
{
err << "Create table " << table.getTableName() << " failed: "
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index 0f68b484f41..a339ebc5b8f 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -32,7 +32,7 @@ bin_SCRIPTS = @server_scripts@ \
mysqldumpslow \
mysql_explain_log \
mysql_tableinfo \
- mysql_upgrade \
+ mysql_upgrade_shell \
mysqld_multi \
mysql_create_system_tables
@@ -60,7 +60,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \
mysql_explain_log.sh \
mysqld_multi.sh \
mysql_tableinfo.sh \
- mysql_upgrade.sh \
+ mysql_upgrade_shell.sh \
mysqld_safe.sh \
mysql_create_system_tables.sh
@@ -89,7 +89,7 @@ CLEANFILES = @server_scripts@ \
mysqldumpslow \
mysql_explain_log \
mysql_tableinfo \
- mysql_upgrade \
+ mysql_upgrade_shell \
mysqld_multi \
make_win_src_distribution \
mysql_create_system_tables
diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh
index 36c941ef6aa..c344cf3e93a 100644
--- a/scripts/make_binary_distribution.sh
+++ b/scripts/make_binary_distribution.sh
@@ -134,7 +134,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \
client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \
client/mysqldump$BS client/mysqlimport$BS \
client/mysqltest$BS client/mysqlcheck$BS \
- client/mysqlbinlog$BS \
+ client/mysqlbinlog$BS client/mysql_upgrade$BS \
tests/mysql_client_test$BS \
libmysqld/examples/mysql_client_test_embedded$BS \
libmysqld/examples/mysqltest_embedded$BS \
@@ -178,11 +178,21 @@ if [ $BASE_SYSTEM = "netware" ] ; then
fi
copyfileto $BASE/lib \
- libmysql/.libs/libmysqlclient.a libmysql/.libs/libmysqlclient.so* \
- libmysql/libmysqlclient.* libmysql_r/.libs/libmysqlclient_r.a \
- libmysql_r/.libs/libmysqlclient_r.so* libmysql_r/libmysqlclient_r.* \
+ libmysql/.libs/libmysqlclient.a \
+ libmysql/.libs/libmysqlclient.so* \
+ libmysql/.libs/libmysqlclient.sl* \
+ libmysql/.libs/libmysqlclient*.dylib \
+ libmysql/libmysqlclient.* \
+ libmysql_r/.libs/libmysqlclient_r.a \
+ libmysql_r/.libs/libmysqlclient_r.so* \
+ libmysql_r/.libs/libmysqlclient_r.sl* \
+ libmysql_r/.libs/libmysqlclient_r*.dylib \
+ libmysql_r/libmysqlclient_r.* \
+ libmysqld/.libs/libmysqld.a \
+ libmysqld/.libs/libmysqld.so* \
+ libmysqld/.libs/libmysqld.sl* \
+ libmysqld/.libs/libmysqld*.dylib \
mysys/libmysys.a strings/libmystrings.a dbug/libdbug.a \
- libmysqld/.libs/libmysqld.a libmysqld/.libs/libmysqld.so* \
libmysqld/libmysqld.a netware/libmysql.imp \
zlib/.libs/libz.a
diff --git a/scripts/mysql_upgrade.sh b/scripts/mysql_upgrade_shell.sh
index c9f375b6c5b..c9f375b6c5b 100644
--- a/scripts/mysql_upgrade.sh
+++ b/scripts/mysql_upgrade_shell.sh
diff --git a/server-tools/instance-manager/instance_options.cc b/server-tools/instance-manager/instance_options.cc
index 9389694822a..72621ed1662 100644
--- a/server-tools/instance-manager/instance_options.cc
+++ b/server-tools/instance-manager/instance_options.cc
@@ -391,8 +391,13 @@ int Instance_options::complete_initialization(const char *default_path,
const char *tmp;
char *end;
- if (!mysqld_path && !(mysqld_path= strdup_root(&alloc, default_path)))
- goto err;
+ if (!mysqld_path)
+ {
+ // Need one extra byte, as convert_dirname() adds a slash at the end.
+ if (!(mysqld_path= alloc_root(&alloc, strlen(default_path) + 2)))
+ goto err;
+ strcpy((char *)mysqld_path, default_path);
+ }
// it's safe to cast this to char* since this is a buffer we are allocating
end= convert_dirname((char*)mysqld_path, mysqld_path, NullS);
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index c6d5c77803b..02bcde43f11 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -632,8 +632,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table,
DBUG_PRINT("info", ("Length %d \n", table->s->connect_string.length));
DBUG_PRINT("info", ("String %.*s \n", table->s->connect_string.length,
table->s->connect_string.str));
- share->scheme= my_strdup_with_length((const byte*)table->s->
- connect_string.str,
+ share->scheme= my_strdup_with_length(table->s->connect_string.str,
table->s->connect_string.length,
MYF(0));
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 46ab5b88624..a955bc44721 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -4116,7 +4116,11 @@ static int create_ndb_column(NDBCOL &col,
static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
{
- if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */
+ ha_rows max_rows= form->s->max_rows;
+ ha_rows min_rows= form->s->min_rows;
+ if (max_rows < min_rows)
+ max_rows= min_rows;
+ if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */
return;
/**
* get the number of fragments right
@@ -4134,7 +4138,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
acc_row_size+= 4 + /*safety margin*/ 4;
#endif
ulonglong acc_fragment_size= 512*1024*1024;
- ulonglong max_rows= form->s->max_rows;
#if MYSQL_VERSION_ID >= 50100
no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1;
#else
@@ -4158,6 +4161,8 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
ftype= NDBTAB::FragAllSmall;
tab.setFragmentType(ftype);
}
+ tab.setMaxRows(max_rows);
+ tab.setMinRows(min_rows);
}
int ha_ndbcluster::create(const char *name,
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index a51ebd39147..7a35dedc08a 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1667,13 +1667,13 @@ String *Item_func_database::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
THD *thd= current_thd;
- if (!thd->db)
+ if (thd->db == NULL)
{
null_value= 1;
return 0;
}
else
- str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info);
+ str->copy(thd->db, thd->db_length, system_charset_info);
return str;
}
diff --git a/sql/lock.cc b/sql/lock.cc
index 71384fe7fc6..97a080c5634 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -905,7 +905,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list)
if (table_list->table)
{
hash_delete(&open_cache, (byte*) table_list->table);
- (void) pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
}
}
@@ -997,9 +997,9 @@ end:
(default 0, which will unlock all tables)
NOTES
- One must have a lock on LOCK_open when calling this
- This function will send a COND_refresh signal to inform other threads
- that the name locks are removed
+ One must have a lock on LOCK_open when calling this.
+ This function will broadcast refresh signals to inform other threads
+ that the name locks are removed.
RETURN
0 ok
@@ -1013,7 +1013,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
table != last_table;
table= table->next_local)
unlock_table_name(thd,table);
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
}
@@ -1304,3 +1304,37 @@ bool make_global_read_lock_block_commit(THD *thd)
}
+/*
+ Broadcast COND_refresh and COND_global_read_lock.
+
+ SYNOPSIS
+ broadcast_refresh()
+ void No parameters.
+
+ DESCRIPTION
+ Due to a bug in a threading library it could happen that a signal
+ did not reach its target. A condition for this was that the same
+ condition variable was used with different mutexes in
+ pthread_cond_wait(). Some time ago we changed LOCK_open to
+ LOCK_global_read_lock in global read lock handling. So COND_refresh
+ was used with LOCK_open and LOCK_global_read_lock.
+
+ We did now also change from COND_refresh to COND_global_read_lock
+ in global read lock handling. But now it is necessary to signal
+ both conditions at the same time.
+
+ NOTE
+ When signalling COND_global_read_lock within the global read lock
+ handling, it is not necessary to also signal COND_refresh.
+
+ RETURN
+ void
+*/
+
+void broadcast_refresh(void)
+{
+ VOID(pthread_cond_broadcast(&COND_refresh));
+ VOID(pthread_cond_broadcast(&COND_global_read_lock));
+}
+
+
diff --git a/sql/log.cc b/sql/log.cc
index ba02c9ba082..cfb90d398e6 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -36,6 +36,8 @@
MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log;
ulong sync_binlog_counter= 0;
+static Muted_query_log_event invisible_commit;
+
static bool test_if_number(const char *str,
long *res, bool allow_wildcards);
static bool binlog_init();
@@ -94,7 +96,9 @@ static int binlog_end_trans(THD *thd, IO_CACHE *trans_log, Log_event *end_ev)
{
int error=0;
DBUG_ENTER("binlog_end_trans");
- if (end_ev)
+
+ /* NULL denotes ROLLBACK with nothing to replicate */
+ if (end_ev != NULL)
error= mysql_bin_log.write(thd, trans_log, end_ev);
statistic_increment(binlog_cache_use, &LOCK_status);
@@ -126,14 +130,19 @@ static int binlog_commit(THD *thd, bool all)
DBUG_ASSERT(mysql_bin_log.is_open() &&
(all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))));
- if (!my_b_tell(trans_log))
+ if (my_b_tell(trans_log) == 0)
{
// we're here because trans_log was flushed in MYSQL_LOG::log()
DBUG_RETURN(0);
}
- Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
- qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
- DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev));
+ if (all)
+ {
+ Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE);
+ qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE)
+ DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev));
+ }
+ else
+ DBUG_RETURN(binlog_end_trans(thd, trans_log, &invisible_commit));
}
static int binlog_rollback(THD *thd, bool all)
@@ -1813,6 +1822,9 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event)
DBUG_ENTER("MYSQL_LOG::write(THD *, IO_CACHE *, Log_event *)");
VOID(pthread_mutex_lock(&LOCK_log));
+ /* NULL would represent nothing to replicate after ROLLBACK */
+ DBUG_ASSERT(commit_event != NULL);
+
if (likely(is_open())) // Should always be true
{
uint length;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 266d6b064bd..b4707826205 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1229,6 +1229,18 @@ bool Query_log_event::write(IO_CACHE* file)
my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0;
}
+/*
+ Query_log_event::Query_log_event()
+
+ The simplest constructor that could possibly work. This is used for
+ creating static objects that have a special meaning and are invisible
+ to the log.
+*/
+Query_log_event::Query_log_event()
+ :Log_event(), data_buf(0)
+{
+}
+
/*
Query_log_event::Query_log_event()
@@ -1856,9 +1868,10 @@ end:
don't suffer from these assignments to 0 as DROP TEMPORARY
TABLE uses the db.table syntax.
*/
- thd->db= thd->catalog= 0; // prevent db from being freed
+ thd->catalog= 0;
+ thd->reset_db(NULL, 0); // prevent db from being freed
thd->query= 0; // just to be sure
- thd->query_length= thd->db_length =0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
@@ -1876,6 +1889,21 @@ end:
/**************************************************************************
+ Muted_query_log_event methods
+**************************************************************************/
+
+#ifndef MYSQL_CLIENT
+/*
+ Muted_query_log_event::Muted_query_log_event()
+*/
+Muted_query_log_event::Muted_query_log_event()
+ :Query_log_event()
+{
+}
+#endif
+
+
+/**************************************************************************
Start_log_event_v3 methods
**************************************************************************/
@@ -2845,7 +2873,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
TABLE_LIST tables;
bzero((char*) &tables,sizeof(tables));
- tables.db = thd->db;
+ tables.db= thd->strmake(thd->db, thd->db_length);
tables.alias = tables.table_name = (char*) table_name;
tables.lock_type = TL_WRITE;
tables.updating= 1;
@@ -2940,7 +2968,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
ex.skip_lines = skip_lines;
List<Item> field_list;
thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables);
- set_fields(thd->db, field_list, &thd->main_lex.select_lex.context);
+ set_fields(tables.db, field_list, &thd->main_lex.select_lex.context);
thd->variables.pseudo_thread_id= thread_id;
List<Item> set_fields;
if (net)
@@ -2987,11 +3015,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
error:
thd->net.vio = 0;
- char *save_db= thd->db;
+ const char *remember_db= thd->db;
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->db= thd->catalog= 0;
+ thd->catalog= 0;
+ thd->reset_db(NULL, 0);
thd->query= 0;
- thd->query_length= thd->db_length= 0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
if (thd->query_error)
@@ -3008,7 +3037,7 @@ error:
}
slave_print_error(rli,sql_errno,"\
Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- err, (char*)table_name, print_slave_db_safe(save_db));
+ err, (char*)table_name, print_slave_db_safe(remember_db));
free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
return 1;
}
@@ -3018,7 +3047,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
{
slave_print_error(rli,ER_UNKNOWN_ERROR, "\
Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- (char*)table_name, print_slave_db_safe(save_db));
+ (char*)table_name, print_slave_db_safe(remember_db));
return 1;
}
@@ -3094,7 +3123,7 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg,
llstr(pos_arg, buff), flags));
#endif
if (flags & DUP_NAME)
- new_log_ident= my_strdup_with_length((const byte*) new_log_ident_arg,
+ new_log_ident= my_strdup_with_length(new_log_ident_arg,
ident_len, MYF(MY_WME));
DBUG_VOID_RETURN;
}
@@ -3118,7 +3147,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len,
(header_size+post_header_len));
ident_offset = post_header_len;
set_if_smaller(ident_len,FN_REFLEN-1);
- new_log_ident= my_strdup_with_length((byte*) buf + ident_offset,
+ new_log_ident= my_strdup_with_length(buf + ident_offset,
(uint) ident_len,
MYF(MY_WME));
DBUG_VOID_RETURN;
diff --git a/sql/log_event.h b/sql/log_event.h
index 0e1eb7cd13c..f1b441dedb1 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -783,6 +783,7 @@ public:
void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0);
#endif
+ Query_log_event();
Query_log_event(const char* buf, uint event_len,
const Format_description_log_event *description_event,
Log_event_type event_type);
@@ -806,6 +807,26 @@ public:
/* Writes derived event-specific part of post header. */
};
+
+/*****************************************************************************
+
+ Muted Query Log Event class
+
+ Pretends to Log SQL queries, but doesn't actually do so.
+
+ ****************************************************************************/
+class Muted_query_log_event: public Query_log_event
+{
+public:
+#ifndef MYSQL_CLIENT
+ Muted_query_log_event();
+
+ bool write(IO_CACHE* file) { return(false); };
+ virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
+#endif
+};
+
+
#ifdef HAVE_REPLICATION
/*****************************************************************************
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 3bb371b6004..3c58f2cbc6b 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1344,6 +1344,7 @@ void start_waiting_global_read_lock(THD *thd);
bool make_global_read_lock_block_commit(THD *thd);
bool set_protect_against_global_read_lock(void);
void unset_protect_against_global_read_lock(void);
+void broadcast_refresh(void);
/* Lock based on name */
int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list);
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 51317dec50d..1cb3878ac70 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -1133,7 +1133,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex,
uint new_length= (var ? var->value->str_value.length() : 0);
if (!old_value)
old_value= (char*) "";
- if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0))))
+ if (!(res= my_strdup_with_length(old_value, new_length, MYF(0))))
return 1;
/*
Replace the old value in such a way that the any thread using
diff --git a/sql/set_var.h b/sql/set_var.h
index 8e5a94b1e1b..b048428219d 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -935,7 +935,7 @@ public:
uint name_length_arg, gptr data_arg)
:name_length(name_length_arg), data(data_arg)
{
- name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME));
+ name= my_strdup_with_length(name_arg, name_length, MYF(MY_WME));
links->push_back(this);
}
inline bool cmp(const char *name_cmp, uint length)
diff --git a/sql/slave.cc b/sql/slave.cc
index 2b31d722f26..4da447c4bc3 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1177,7 +1177,7 @@ bool net_request_file(NET* net, const char* fname)
}
-const char *rewrite_db(const char* db, uint32 *new_len)
+const char *rewrite_db(const char* db, uint *new_len)
{
if (replicate_rewrite_db.is_empty() || !db)
return db;
@@ -1581,9 +1581,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
// save old db in case we are creating in a different database
save_db = thd->db;
save_db_length= thd->db_length;
- thd->db = (char*)db;
- DBUG_ASSERT(thd->db != 0);
- thd->db_length= strlen(thd->db);
+ DBUG_ASSERT(db != 0);
+ thd->reset_db((char*)db, strlen(db));
mysql_parse(thd, thd->query, packet_len); // run create table
thd->db = save_db; // leave things the way the were before
thd->db_length= save_db_length;
@@ -3713,8 +3712,9 @@ err:
sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s",
IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff));
VOID(pthread_mutex_lock(&LOCK_thread_count));
- thd->query = thd->db = 0; // extra safety
- thd->query_length= thd->db_length= 0;
+ thd->query= 0; // extra safety
+ thd->query_length= 0;
+ thd->reset_db(NULL, 0);
VOID(pthread_mutex_unlock(&LOCK_thread_count));
if (mysql)
{
@@ -3932,8 +3932,10 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \
should already have done these assignments (each event which sets these
variables is supposed to set them to 0 before terminating)).
*/
- thd->query= thd->db= thd->catalog= 0;
- thd->query_length= thd->db_length= 0;
+ thd->catalog= 0;
+ thd->reset_db(NULL, 0);
+ thd->query= 0;
+ thd->query_length= 0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->proc_info = "Waiting for slave mutex on exit";
pthread_mutex_lock(&rli->run_lock);
diff --git a/sql/slave.h b/sql/slave.h
index 040ce4eaf85..7f08105c0b9 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -526,10 +526,6 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock,
MASTER_INFO* mi,
bool high_priority);
-/* If fd is -1, dump to NET */
-int mysql_table_dump(THD* thd, const char* db,
- const char* tbl_name, int fd = -1);
-
/* retrieve table from master and copy to slave*/
int fetch_master_table(THD* thd, const char* db_name, const char* table_name,
MASTER_INFO* mi, MYSQL* mysql, bool overwrite);
@@ -554,7 +550,7 @@ int add_table_rule(HASH* h, const char* table_spec);
int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
void init_table_rule_hash(HASH* h, bool* h_inited);
void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
-const char *rewrite_db(const char* db, uint32 *new_db_len);
+const char *rewrite_db(const char* db, uint *new_db_len);
const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
diff --git a/sql/sp.cc b/sql/sp.cc
index cae7a56fa57..553465ebff8 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -404,7 +404,8 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
LEX *old_lex= thd->lex, newlex;
String defstr;
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
ulong old_sql_mode= thd->variables.sql_mode;
ha_rows old_select_limit= thd->variables.select_limit;
@@ -450,9 +451,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
goto end;
}
- dbchanged= FALSE;
- if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb),
- 1, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged)))
goto end;
lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length());
@@ -462,14 +461,14 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp,
{
sp_head *sp= newlex.sphead;
- if (dbchanged && (ret= mysql_change_db(thd, olddb, 1)))
+ if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1)))
goto end;
delete sp;
ret= SP_PARSE_ERROR;
}
else
{
- if (dbchanged && (ret= mysql_change_db(thd, olddb, 1)))
+ if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1)))
goto end;
*sphp= newlex.sphead;
(*sphp)->set_definer(&definer_user_name, &definer_host_name);
@@ -505,15 +504,14 @@ db_create_routine(THD *thd, int type, sp_head *sp)
int ret;
TABLE *table;
char definer[USER_HOST_BUFF_SIZE];
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
DBUG_ENTER("db_create_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length,
sp->m_name.str));
- dbchanged= FALSE;
- if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb),
- 0, &dbchanged)))
+ if ((ret= sp_use_new_db(thd, sp->m_db, &old_db, 0, &dbchanged)))
{
ret= SP_NO_DB_ERROR;
goto done;
@@ -641,7 +639,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
done:
close_thread_tables(thd);
if (dbchanged)
- (void)mysql_change_db(thd, olddb, 1);
+ (void) mysql_change_db(thd, old_db.str, 1);
DBUG_RETURN(ret);
}
@@ -1814,49 +1812,76 @@ create_string(THD *thd, String *buf,
}
-//
-// Utilities...
-//
+
+/*
+ Change the current database if needed.
+
+ SYNOPSIS
+ sp_use_new_db()
+ thd thread handle
+
+ new_db new database name (a string and its length)
+
+ old_db [IN] str points to a buffer where to store the old
+ database, length contains the size of the buffer
+ [OUT] if old db was not NULL, its name is copied
+ to the buffer pointed at by str and length is updated
+ accordingly. Otherwise str[0] is set to '\0' and length
+ is set to 0. The out parameter should be used only if
+ the database name has been changed (see dbchangedp).
+
+ dbchangedp [OUT] is set to TRUE if the current database is changed,
+ FALSE otherwise. A database is not changed if the old
+ name is the same as the new one, both names are empty,
+ or an error has occurred.
+
+ RETURN VALUE
+ 0 success
+ 1 access denied or out of memory (the error message is
+ set in THD)
+*/
int
-sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen,
+sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
bool no_access_check, bool *dbchangedp)
{
- bool changeit;
+ int ret;
+ static char empty_c_string[1]= {0}; /* used for not defined db */
DBUG_ENTER("sp_use_new_db");
- DBUG_PRINT("enter", ("newdb: %s", newdb));
+ DBUG_PRINT("enter", ("newdb: %s", new_db.str));
- if (! newdb)
- newdb= (char *)"";
- if (thd->db && thd->db[0])
+ /*
+ Set new_db to an empty string if it's NULL, because mysql_change_db
+ requires a non-NULL argument.
+ new_db.str can be NULL only if we're restoring the old database after
+ execution of a stored procedure and there were no current database
+ selected. The stored procedure itself must always have its database
+ initialized.
+ */
+ if (new_db.str == NULL)
+ new_db.str= empty_c_string;
+
+ if (thd->db)
{
- if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0)
- changeit= 0;
- else
- {
- changeit= 1;
- strnmov(olddb, thd->db, olddblen);
- }
+ old_db->length= (strmake(old_db->str, thd->db, old_db->length) -
+ old_db->str);
}
else
- { // thd->db empty
- if (newdb[0])
- changeit= 1;
- else
- changeit= 0;
- olddb[0] = '\0';
+ {
+ old_db->str[0]= '\0';
+ old_db->length= 0;
}
- if (!changeit)
+
+ /* Don't change the database if the new name is the same as the old one. */
+ if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0)
{
*dbchangedp= FALSE;
DBUG_RETURN(0);
}
- else
- {
- int ret= mysql_change_db(thd, newdb, no_access_check);
- if (! ret)
- *dbchangedp= TRUE;
- DBUG_RETURN(ret);
- }
+ ret= mysql_change_db(thd, new_db.str, no_access_check);
+
+ *dbchangedp= ret == 0;
+ DBUG_RETURN(ret);
}
+
diff --git a/sql/sp.h b/sql/sp.h
index 2587a9b115a..631b8a87aa2 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -104,15 +104,15 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first);
TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup);
void close_proc_table(THD *thd, Open_tables_state *backup);
-//
-// Utilities...
-//
-// Do a "use newdb". The current db is stored at olddb.
-// If newdb is the same as the current one, nothing is changed.
-// dbchangedp is set to true if the db was actually changed.
+/*
+ Do a "use new_db". The current db is stored at old_db. If new_db is the
+ same as the current one, nothing is changed. dbchangedp is set to true if
+ the db was actually changed.
+*/
+
int
-sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax,
+sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db,
bool no_access_check, bool *dbchangedp);
#endif /* _SP_H_ */
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index ef2f895c8b2..b3b99557b63 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -376,24 +376,6 @@ sp_name::init_qname(THD *thd)
m_name.length, m_name.str);
}
-sp_name *
-sp_name_current_db_new(THD *thd, LEX_STRING name)
-{
- sp_name *qname;
-
- if (! thd->db)
- qname= new sp_name(name);
- else
- {
- LEX_STRING db;
-
- db.length= strlen(thd->db);
- db.str= thd->strmake(thd->db, db.length);
- qname= new sp_name(db, name);
- }
- qname->init_qname(thd);
- return qname;
-}
/*
Check that the name 'ident' is ok. It's assumed to be an 'ident'
@@ -504,27 +486,20 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name)
/* During parsing, we must use thd->mem_root */
MEM_ROOT *root= thd->mem_root;
+ DBUG_ASSERT(name);
+ /* Must be initialized in the parser */
+ DBUG_ASSERT(name->m_db.str && name->m_db.length);
+
/* We have to copy strings to get them into the right memroot */
- if (name)
- {
- m_db.length= name->m_db.length;
- if (name->m_db.length == 0)
- m_db.str= NULL;
- else
- m_db.str= strmake_root(root, name->m_db.str, name->m_db.length);
- m_name.length= name->m_name.length;
- m_name.str= strmake_root(root, name->m_name.str, name->m_name.length);
-
- if (name->m_qname.length == 0)
- name->init_qname(thd);
- m_qname.length= name->m_qname.length;
- m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length);
- }
- else if (thd->db)
- {
- m_db.length= thd->db_length;
- m_db.str= strmake_root(root, thd->db, m_db.length);
- }
+ m_db.length= name->m_db.length;
+ m_db.str= strmake_root(root, name->m_db.str, name->m_db.length);
+ m_name.length= name->m_name.length;
+ m_name.str= strmake_root(root, name->m_name.str, name->m_name.length);
+
+ if (name->m_qname.length == 0)
+ name->init_qname(thd);
+ m_qname.length= name->m_qname.length;
+ m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length);
if (m_param_begin && m_param_end)
{
@@ -565,7 +540,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src)
result->name= "";
if (!(result->type_names=(const char **)
alloc_root(mem_root,(sizeof(char *)+sizeof(int))*(result->count+1))))
- return 0;
+ DBUG_RETURN(0);
result->type_lengths= (unsigned int *)(result->type_names + result->count+1);
List_iterator<String> it(*src);
String conv;
@@ -599,7 +574,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src)
result->type_names[result->count]= 0;
result->type_lengths[result->count]= 0;
}
- return result;
+ DBUG_RETURN(result);
}
@@ -933,7 +908,8 @@ bool
sp_head::execute(THD *thd)
{
DBUG_ENTER("sp_head::execute");
- char olddb[128];
+ char old_db_buf[NAME_LEN+1];
+ LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) };
bool dbchanged;
sp_rcontext *ctx;
bool err_status= FALSE;
@@ -980,10 +956,8 @@ sp_head::execute(THD *thd)
m_first_instance->m_last_cached_sp == this) ||
(m_recursion_level + 1 == m_next_cached_sp->m_recursion_level));
- dbchanged= FALSE;
if (m_db.length &&
- (err_status= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0,
- &dbchanged)))
+ (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged)))
goto done;
if ((ctx= thd->spcont))
@@ -1154,10 +1128,10 @@ sp_head::execute(THD *thd)
{
/*
No access check when changing back to where we came from.
- (It would generate an error from mysql_change_db() when olddb=="")
+ (It would generate an error from mysql_change_db() when old_db=="")
*/
if (! thd->killed)
- err_status|= mysql_change_db(thd, olddb, 1);
+ err_status|= mysql_change_db(thd, old_db.str, 1);
}
m_flags&= ~IS_INVOKED;
DBUG_PRINT("info",
@@ -1815,9 +1789,6 @@ sp_head::reset_thd_mem_root(THD *thd)
(ulong) &mem_root, (ulong) &thd->mem_root));
free_list= thd->free_list; // Keep the old list
thd->free_list= NULL; // Start a new one
- /* Copy the db, since substatements will point to it */
- m_thd_db= thd->db;
- thd->db= thd->strmake(thd->db, thd->db_length);
m_thd= thd;
DBUG_VOID_RETURN;
}
@@ -1833,7 +1804,6 @@ sp_head::restore_thd_mem_root(THD *thd)
DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx",
(ulong) &mem_root, (ulong) &thd->mem_root));
thd->free_list= flist; // Restore the old one
- thd->db= m_thd_db; // Restore the original db pointer
thd->mem_root= m_thd_root;
m_thd= NULL;
DBUG_VOID_RETURN;
diff --git a/sql/sp_head.h b/sql/sp_head.h
index d5f49d8a964..073cca2cd12 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -61,13 +61,6 @@ public:
*/
LEX_STRING m_sroutines_key;
- sp_name(LEX_STRING name)
- : m_name(name)
- {
- m_db.str= m_qname.str= m_sroutines_key.str= 0;
- m_db.length= m_qname.length= m_sroutines_key.length= 0;
- }
-
sp_name(LEX_STRING db, LEX_STRING name)
: m_db(db), m_name(name)
{
@@ -101,8 +94,6 @@ public:
{}
};
-sp_name *
-sp_name_current_db_new(THD *thd, LEX_STRING name);
bool
check_routine_name(LEX_STRING name);
@@ -355,7 +346,6 @@ private:
MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root
THD *m_thd; // Set if we have reset mem_root
- char *m_thd_db; // Original thd->db pointer
sp_pcontext *m_pcont; // Parse context
List<LEX> m_lex; // Temp. store for the other lex
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 7d14e99fb77..5904e13d710 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -530,7 +530,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (found_old_table)
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
if (!lock_in_use)
VOID(pthread_mutex_unlock(&LOCK_open));
@@ -1035,7 +1035,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find)
}
*prev=0;
// Notify any 'refresh' threads
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
return start;
}
@@ -1577,7 +1577,7 @@ bool reopen_table(TABLE *table,bool locked)
if (table->triggers)
table->triggers->set_table(table);
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
error=0;
end:
@@ -1678,7 +1678,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh)
{
my_afree((gptr) tables);
}
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
*prev=0;
DBUG_RETURN(error);
}
@@ -1715,7 +1715,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks,
}
}
if (found)
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -1807,7 +1807,7 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name)
}
*prev=0;
if (found)
- VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh
+ broadcast_refresh();
if (thd->locked_tables && thd->locked_tables->table_count == 0)
{
my_free((gptr) thd->locked_tables,MYF(0));
@@ -5249,7 +5249,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
Signal any thread waiting for tables to be freed to
reopen their tables
*/
- (void) pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_PRINT("info", ("Waiting for refresh signal"));
if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed)
{
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 2cfc9142453..1ba104df2a4 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1300,7 +1300,7 @@ public:
pthread_t real_id;
uint tmp_table, global_read_lock;
uint server_status,open_options,system_thread;
- uint32 db_length;
+ uint db_length;
uint select_number; //number of select (used for EXPLAIN)
/* variables.transaction_isolation is reset to this after each commit */
enum_tx_isolation session_tx_isolation;
@@ -1571,6 +1571,47 @@ public:
void restore_sub_statement_state(Sub_statement_state *backup);
void set_n_backup_active_arena(Query_arena *set, Query_arena *backup);
void restore_active_arena(Query_arena *set, Query_arena *backup);
+
+ /*
+ Initialize the current database from a NULL-terminated string with length
+ */
+ void set_db(const char *new_db, uint new_db_len)
+ {
+ if (new_db)
+ {
+ /* Do not reallocate memory if current chunk is big enough. */
+ if (db && db_length >= new_db_len)
+ memcpy(db, new_db, new_db_len+1);
+ else
+ {
+ safeFree(db);
+ db= my_strdup_with_length(new_db, new_db_len, MYF(MY_WME));
+ }
+ db_length= db ? new_db_len: 0;
+ }
+ }
+ void reset_db(char *new_db, uint new_db_len)
+ {
+ db= new_db;
+ db_length= new_db_len;
+ }
+ /*
+ Copy the current database to the argument. Use the current arena to
+ allocate memory for a deep copy: current database may be freed after
+ a statement is parsed but before it's executed.
+ */
+ bool copy_db_to(char **p_db, uint *p_db_length)
+ {
+ if (db == NULL)
+ {
+ my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ return TRUE;
+ }
+ *p_db= strmake(db, db_length);
+ if (p_db_length)
+ *p_db_length= db_length;
+ return FALSE;
+ }
};
@@ -1916,7 +1957,7 @@ typedef struct st_sort_buffer {
class Table_ident :public Sql_alloc
{
- public:
+public:
LEX_STRING db;
LEX_STRING table;
SELECT_LEX_UNIT *sel;
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index a52972753a7..44947384b32 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -805,8 +805,7 @@ exit:
{
if (!(thd->slave_thread)) /* a slave thread will free it itself */
x_free(thd->db);
- thd->db= 0;
- thd->db_length= 0;
+ thd->reset_db(NULL, 0);
}
VOID(pthread_mutex_unlock(&LOCK_mysql_create_db));
start_waiting_global_read_lock(thd);
@@ -1218,14 +1217,10 @@ end:
{
if (!(thd->slave_thread))
my_free(dbname, MYF(0));
- thd->db= NULL;
- thd->db_length= 0;
+ thd->reset_db(NULL, 0);
}
else
- {
- thd->db= dbname; // THD::~THD will free this
- thd->db_length= db_length;
- }
+ thd->reset_db(dbname, db_length); // THD::~THD will free this
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (!no_access_check)
sctx->db_access= db_access;
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 1cd7778a053..0193d4d5355 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -254,7 +254,8 @@ err:
DESCRIPTION
Though this function takes a list of tables, only the first list entry
- will be closed. Broadcasts a COND_refresh condition.
+ will be closed.
+ Broadcasts refresh if it closed the table.
RETURN
FALSE ok
@@ -291,7 +292,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables)
if (close_thread_table(thd, table_ptr))
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
VOID(pthread_mutex_unlock(&LOCK_open));
}
@@ -608,7 +609,7 @@ err0:
tables are closed (if MYSQL_HA_FLUSH_ALL) is set.
If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set,
all HANDLER tables marked for flush are closed.
- Broadcasts a COND_refresh condition, for every table closed.
+ Broadcasts refresh for every table closed.
NOTE
Since mysql_ha_flush() is called when the base table has to be closed,
@@ -704,7 +705,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
MYSQL_HA_REOPEN_ON_USAGE mark for reopen.
DESCRIPTION
- Broadcasts a COND_refresh condition, for every table closed.
+ Broadcasts refresh if it closed the table.
The caller must lock LOCK_open.
RETURN
@@ -742,7 +743,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags)
if (close_thread_table(thd, table_ptr))
{
/* Tell threads waiting for refresh that something has happened */
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
DBUG_RETURN(0);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 8ffc6f53a43..ba0d2d00f2c 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -298,9 +298,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
{
if (thd->locked_tables)
{
- if (find_locked_table(thd,
- table_list->db ? table_list->db : thd->db,
- table_list->table_name))
+ DBUG_ASSERT(table_list->db); /* Must be set in the parser */
+ if (find_locked_table(thd, table_list->db, table_list->table_name))
{
my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0),
table_list->table_name);
@@ -1329,8 +1328,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
TABLE *table;
DBUG_ENTER("delayed_get_table");
- if (!table_list->db)
- table_list->db=thd->db;
+ /* Must be set in the parser */
+ DBUG_ASSERT(table_list->db);
/* Find the thread which handles this table. */
if (!(tmp=find_handler(thd,table_list)))
@@ -1349,18 +1348,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
*/
if (! (tmp= find_handler(thd, table_list)))
{
- /*
- Avoid that a global read lock steps in while we are creating the
- new thread. It would block trying to open the table. Hence, the
- DI thread and this thread would wait until after the global
- readlock is gone. Since the insert thread needs to wait for a
- global read lock anyway, we do it right now. Note that
- wait_if_global_read_lock() sets a protection against a new
- global read lock when it succeeds. This needs to be released by
- start_waiting_global_read_lock().
- */
- if (wait_if_global_read_lock(thd, 0, 1))
- goto err;
if (!(tmp=new delayed_insert()))
{
my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert));
@@ -1369,15 +1356,15 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
pthread_mutex_lock(&LOCK_thread_count);
thread_count++;
pthread_mutex_unlock(&LOCK_thread_count);
- if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) ||
- !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME))))
+ tmp->thd.set_db(table_list->db, strlen(table_list->db));
+ tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME));
+ if (tmp->thd.db == NULL || tmp->thd.query == NULL)
{
delete tmp;
my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0));
goto err1;
}
tmp->table_list= *table_list; // Needed to open table
- tmp->table_list.db= tmp->thd.db;
tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query;
tmp->lock();
pthread_mutex_lock(&tmp->mutex);
@@ -1401,11 +1388,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
pthread_cond_wait(&tmp->cond_client,&tmp->mutex);
}
pthread_mutex_unlock(&tmp->mutex);
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
thd->proc_info="got old table";
if (tmp->thd.killed)
{
@@ -1441,11 +1423,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list)
err1:
thd->fatal_error();
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
err:
pthread_mutex_unlock(&LOCK_delayed_create);
DBUG_RETURN(0); // Continue with normal insert
@@ -2676,7 +2653,7 @@ bool select_create::send_eof()
hash_delete(&open_cache,(byte*) table);
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
lock=0;
table=0;
@@ -2705,7 +2682,7 @@ void select_create::abort()
quick_rm_table(table_type, create_table->db, create_table->table_name);
/* Tell threads waiting for refresh that something has happened */
if (version != refresh_version)
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
}
else if (!create_info->table_existed)
close_temporary_table(thd, create_table->db, create_table->table_name);
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index c75aa8f31b9..d63c6ef9f20 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -758,6 +758,11 @@ public:
*this= *state;
}
+ /*
+ Direct addition to the list of query tables.
+ If you are using this function, you must ensure that the table
+ object, in particular table->db member, is initialized.
+ */
void add_to_query_tables(TABLE_LIST *table)
{
*(table->prev_global= query_tables_last)= table;
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index d0bc2f434d5..cd57c280950 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -93,8 +93,6 @@ const char *xa_state_names[]={
"NON-EXISTING", "ACTIVE", "IDLE", "PREPARED"
};
-static char empty_c_string[1]= {0}; // Used for not defined 'db'
-
#ifdef __WIN__
static void test_signal(int sig_ptr)
{
@@ -300,8 +298,7 @@ int check_user(THD *thd, enum enum_server_command command,
thd->db is saved in caller and needs to be freed by caller if this
function returns 0
*/
- thd->db= 0;
- thd->db_length= 0;
+ thd->reset_db(NULL, 0);
if (mysql_change_db(thd, db, FALSE))
{
/* Send the error to the client */
@@ -341,9 +338,8 @@ int check_user(THD *thd, enum enum_server_command command,
if connect failed. Also in case of 'CHANGE USER' failure, current
database will be switched to 'no database selected'.
*/
- thd->db= 0;
- thd->db_length= 0;
-
+ thd->reset_db(NULL, 0);
+
USER_RESOURCES ur;
int res= acl_getroot(thd, &ur, passwd, passwd_len);
#ifndef EMBEDDED_LIBRARY
@@ -1321,19 +1317,6 @@ end:
DBUG_RETURN(0);
}
- /* This works because items are allocated with sql_alloc() */
-
-void free_items(Item *item)
-{
- Item *next;
- DBUG_ENTER("free_items");
- for (; item ; item=next)
- {
- next=item->next;
- item->delete_self();
- }
- DBUG_VOID_RETURN;
-}
/* This works because items are allocated with sql_alloc() */
@@ -1345,7 +1328,26 @@ void cleanup_items(Item *item)
DBUG_VOID_RETURN;
}
-int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
+/*
+ Handle COM_TABLE_DUMP command
+
+ SYNOPSIS
+ mysql_table_dump
+ thd thread handle
+ db database name or an empty string. If empty,
+ the current database of the connection is used
+ tbl_name name of the table to dump
+
+ NOTES
+ This function is written to handle one specific command only.
+
+ RETURN VALUE
+ 0 success
+ 1 error, the error message is set in THD
+*/
+
+static
+int mysql_table_dump(THD* thd, char* db, char* tbl_name)
{
TABLE* table;
TABLE_LIST* table_list;
@@ -1382,7 +1384,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd)
goto err;
}
net_flush(&thd->net);
- if ((error= table->file->dump(thd,fd)))
+ if ((error= table->file->dump(thd,-1)))
my_error(ER_GET_ERRNO, MYF(0), error);
err:
@@ -1632,7 +1634,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
}
tbl_name= strmake(db, packet + 1, db_len)+1;
strmake(tbl_name, packet + db_len + 2, tbl_len);
- mysql_table_dump(thd, db, tbl_name, -1);
+ mysql_table_dump(thd, db, tbl_name);
break;
}
case COM_CHANGE_USER:
@@ -1806,11 +1808,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS],
&LOCK_status);
bzero((char*) &table_list,sizeof(table_list));
- if (!(table_list.db=thd->db))
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
+ if (thd->copy_db_to(&table_list.db, 0))
break;
- }
pend= strend(packet);
thd->convert_string(&conv_name, system_charset_info,
packet, (uint) (pend-packet), thd->charset());
@@ -2157,6 +2156,34 @@ void log_slow_statement(THD *thd)
}
+/*
+ Create a TABLE_LIST object for an INFORMATION_SCHEMA table.
+
+ SYNOPSIS
+ prepare_schema_table()
+ thd thread handle
+ lex current lex
+ table_ident table alias if it's used
+ schema_table_idx the type of the INFORMATION_SCHEMA table to be
+ created
+
+ DESCRIPTION
+ This function is used in the parser to convert a SHOW or DESCRIBE
+ table_name command to a SELECT from INFORMATION_SCHEMA.
+ It prepares a SELECT_LEX and a TABLE_LIST object to represent the
+ given command as a SELECT parse tree.
+
+ NOTES
+ Due to the way this function works with memory and LEX it cannot
+ be used outside the parser (parse tree transformations outside
+ the parser break PS and SP).
+
+ RETURN VALUE
+ 0 success
+ 1 out of memory or SHOW commands are not allowed
+ in this version of the server.
+*/
+
int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
enum enum_schema_tables schema_table_idx)
{
@@ -2184,13 +2211,13 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
DBUG_RETURN(1);
#else
{
- char *db= lex->select_lex.db ? lex->select_lex.db : thd->db;
- if (!db)
+ char *db;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, 0))
{
- my_message(ER_NO_DB_ERROR,
- ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */
- DBUG_RETURN(1); /* purecov: inspected */
+ DBUG_RETURN(1);
}
+ db= lex->select_lex.db;
remove_escape(db); // Fix escaped '_'
if (check_db_name(db))
{
@@ -2207,11 +2234,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
db);
DBUG_RETURN(1);
}
- /*
- We need to do a copy to make this prepared statement safe if this
- was thd->db
- */
- lex->select_lex.db= thd->strdup(db);
break;
}
#endif
@@ -2342,17 +2364,37 @@ static void reset_one_shot_variables(THD *thd)
}
-/****************************************************************************
-** mysql_execute_command
-** Execute command saved in thd and current_lex->sql_command
-****************************************************************************/
+/*
+ Execute command saved in thd and current_lex->sql_command
+
+ SYNOPSIS
+ mysql_execute_command()
+ thd Thread handle
+
+ IMPLEMENTATION
+
+ Before every operation that can request a write lock for a table
+ wait if a global read lock exists. However do not wait if this
+ thread has locked tables already. No new locks can be requested
+ until the other locks are released. The thread that requests the
+ global read lock waits for write locked tables to become unlocked.
+
+ Note that wait_if_global_read_lock() sets a protection against a new
+ global read lock when it succeeds. This needs to be released by
+ start_waiting_global_read_lock() after the operation.
+
+ RETURN
+ FALSE OK
+ TRUE Error
+*/
bool
mysql_execute_command(THD *thd)
{
- bool res= FALSE;
- int result= 0;
- LEX *lex= thd->lex;
+ bool res= FALSE;
+ bool need_start_waiting= FALSE; // have protection against global read lock
+ int result= 0;
+ LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
SELECT_LEX *select_lex= &lex->select_lex;
/* first table of first SELECT_LEX */
@@ -2744,8 +2786,8 @@ mysql_execute_command(THD *thd)
case SQLCOM_LOAD_MASTER_TABLE:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (!first_table->db)
- first_table->db= thd->db;
+ DBUG_ASSERT(first_table->db); /* Must be set in the parser */
+
if (check_access(thd, CREATE_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)))
@@ -2837,7 +2879,8 @@ mysql_execute_command(THD *thd)
TABLE in the same way. That way we avoid that a new table is
created during a gobal read lock.
*/
- if (wait_if_global_read_lock(thd, 0, 1))
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
{
res= 1;
goto end_with_restore_list;
@@ -2862,7 +2905,7 @@ mysql_execute_command(THD *thd)
{
update_non_unique_table_error(create_table, "CREATE", duplicate);
res= 1;
- goto end_with_restart_wait;
+ goto end_with_restore_list;
}
}
/* If we create merge table, we have to test tables in merge, too */
@@ -2878,7 +2921,7 @@ mysql_execute_command(THD *thd)
{
update_non_unique_table_error(tab, "CREATE", duplicate);
res= 1;
- goto end_with_restart_wait;
+ goto end_with_restore_list;
}
}
}
@@ -2920,13 +2963,6 @@ mysql_execute_command(THD *thd)
send_ok(thd);
}
-end_with_restart_wait:
- /*
- Release the protection against the global read lock and wake
- everyone, who might want to set a global read lock.
- */
- start_waiting_global_read_lock(thd);
-
/* put tables back for PS rexecuting */
end_with_restore_list:
lex->link_first_table_back(create_table, link_to_local);
@@ -2993,25 +3029,8 @@ end_with_restore_list:
my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name);
goto error;
}
- if (!select_lex->db)
- {
- /*
- In the case of ALTER TABLE ... RENAME we should supply the
- default database if the new name is not explicitly qualified
- by a database. (Bug #11493)
- */
- if (lex->alter_info.flags & ALTER_RENAME)
- {
- if (! thd->db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- goto error;
- }
- select_lex->db= thd->db;
- }
- else
- select_lex->db= first_table->db;
- }
+ /* Must be set in the parser */
+ DBUG_ASSERT(select_lex->db);
if (check_access(thd, ALTER_ACL, first_table->db,
&first_table->grant.privilege, 0, 0,
test(first_table->schema_table)) ||
@@ -3044,6 +3063,13 @@ end_with_restore_list:
goto error;
else
{
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
thd->enable_slow_log= opt_log_slow_admin_statements;
res= mysql_alter_table(thd, select_lex->db, lex->name,
&lex->create_info,
@@ -3301,6 +3327,14 @@ end_with_restore_list:
break;
/* Skip first table, which is the table we are inserting in */
select_lex->context.table_list= first_table->next_local;
+
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values,
lex->update_list, lex->value_list,
lex->duplicates, lex->ignore);
@@ -3324,6 +3358,14 @@ end_with_restore_list:
select_lex->options|= SELECT_NO_UNLOCK;
unit->set_limit(select_lex);
+
+ if (! thd->locked_tables &&
+ ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
if (!(res= open_and_lock_tables(thd, all_tables)))
{
/* Skip first table, which is the table we are inserting in */
@@ -3391,6 +3433,14 @@ end_with_restore_list:
break;
DBUG_ASSERT(select_lex->offset_limit == 0);
unit->set_limit(select_lex);
+
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
res = mysql_delete(thd, all_tables, select_lex->where,
&select_lex->order_list,
unit->select_limit_cnt, select_lex->options,
@@ -3404,6 +3454,13 @@ end_with_restore_list:
(TABLE_LIST *)thd->lex->auxilliary_table_list.first;
multi_delete *result;
+ if (!thd->locked_tables &&
+ !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1)))
+ {
+ res= 1;
+ break;
+ }
+
if ((res= multi_delete_precheck(thd, all_tables)))
break;
@@ -3681,12 +3738,8 @@ end_with_restore_list:
}
case SQLCOM_ALTER_DB:
{
- char *db= lex->name ? lex->name : thd->db;
- if (!db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- break;
- }
+ char *db= lex->name;
+ DBUG_ASSERT(db); /* Must be set in the parser */
if (!strip_sp(db) || check_db_name(db))
{
my_error(ER_WRONG_DB_NAME, MYF(0), lex->name);
@@ -4135,23 +4188,11 @@ end_with_restore_list:
case SQLCOM_CREATE_SPFUNCTION:
{
uint namelen;
- char *name, *db;
+ char *name;
int result;
DBUG_ASSERT(lex->sphead != 0);
-
- if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0])
- {
- if (!thd->db)
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0));
- delete lex->sphead;
- lex->sphead= 0;
- goto error;
- }
- lex->sphead->m_db.length= strlen(thd->db);
- lex->sphead->m_db.str= thd->db;
- }
+ DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */
if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0,
is_schema_db(lex->sphead->m_db.str)))
@@ -4268,41 +4309,27 @@ end_with_restore_list:
}
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
- /*
- We need to copy name and db in order to use them for
- check_routine_access which is called after lex->sphead has
- been deleted.
- */
- name= thd->strdup(name);
- lex->sphead->m_db.str= db= thd->strmake(lex->sphead->m_db.str,
- lex->sphead->m_db.length);
res= (result= lex->sphead->create(thd));
if (result == SP_OK)
{
- /*
- We must cleanup the unit and the lex here because
- sp_grant_privileges calls (indirectly) db_find_routine,
- which in turn may call MYSQLparse with THD::lex.
- TODO: fix db_find_routine to use a temporary lex.
- */
- lex->unit.cleanup();
- delete lex->sphead;
- lex->sphead= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* only add privileges if really neccessary */
if (sp_automatic_privileges && !opt_noacl &&
check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS,
- db, name,
+ lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1))
{
close_thread_tables(thd);
- if (sp_grant_privileges(thd, db, name,
+ if (sp_grant_privileges(thd, lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE))
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_PROC_AUTO_GRANT_FAIL,
ER(ER_PROC_AUTO_GRANT_FAIL));
}
#endif
+ lex->unit.cleanup();
+ delete lex->sphead;
+ lex->sphead= 0;
send_ok(thd);
}
else
@@ -4717,7 +4744,8 @@ end_with_restore_list:
view_store_options(thd, first_table, &buff);
buff.append(STRING_WITH_LEN("VIEW "));
/* Test if user supplied a db (ie: we did not use thd->db) */
- if (first_table->db != thd->db && first_table->db[0])
+ if (first_table->db && first_table->db[0] &&
+ (thd->db == NULL || strcmp(first_table->db, thd->db)))
{
append_identifier(thd, &buff, first_table->db,
first_table->db_length);
@@ -4970,10 +4998,22 @@ end_with_restore_list:
if (lex->sql_command != SQLCOM_CALL && lex->sql_command != SQLCOM_EXECUTE &&
uc_update_queries[lex->sql_command]<2)
thd->row_count_func= -1;
- DBUG_RETURN(res || thd->net.report_error);
+
+ goto end;
error:
- DBUG_RETURN(1);
+ res= TRUE;
+
+end:
+ if (need_start_waiting)
+ {
+ /*
+ Release the protection against the global read lock and wake
+ everyone, who might want to set a global read lock.
+ */
+ start_waiting_global_read_lock(thd);
+ }
+ DBUG_RETURN(res || thd->net.report_error);
}
@@ -5287,7 +5327,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
(want_access & ~EXTRA_ACL) &&
thd->db)
tables->grant.privilege= want_access;
- else if (tables->db && tables->db == thd->db)
+ else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0)
{
if (found && !grant_option) // db already checked
tables->grant.privilege=found_access;
@@ -5435,22 +5475,25 @@ bool check_merge_table_access(THD *thd, char *db,
static bool check_db_used(THD *thd,TABLE_LIST *tables)
{
+ char *current_db= NULL;
for (; tables; tables= tables->next_global)
{
- if (!tables->db)
+ if (tables->db == NULL)
{
- if (!(tables->db=thd->db))
- {
- my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR),
- MYF(0)); /* purecov: tested */
- return TRUE; /* purecov: tested */
- }
+ /*
+ This code never works and should be removed in 5.1. All tables
+ that are added to the list of tables should already have its
+ database field initialized properly (see st_lex::add_table_to_list).
+ */
+ DBUG_ASSERT(0);
+ if (thd->copy_db_to(&current_db, 0))
+ return TRUE;
+ tables->db= current_db;
}
}
return FALSE;
}
-
/****************************************************************************
Check stack size; Send error if there isn't enough stack to continue
****************************************************************************/
@@ -6070,19 +6113,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->db= table->db.str;
ptr->db_length= table->db.length;
}
- else if (thd->db)
- {
- ptr->db= thd->db;
- ptr->db_length= thd->db_length;
- }
- else
- {
- /* The following can't be "" as we may do 'casedn_str()' on it */
- ptr->db= empty_c_string;
- ptr->db_length= 0;
- }
- if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
- ptr->db= thd->strdup(ptr->db);
+ else if (thd->copy_db_to(&ptr->db, &ptr->db_length))
+ DBUG_RETURN(0);
ptr->alias= alias_str;
if (lower_case_table_names && table->table.length)
@@ -7273,6 +7305,8 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables)
my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0));
DBUG_RETURN(TRUE);
}
+ if (check_db_used(thd, tables))
+ DBUG_RETURN(TRUE);
DBUG_RETURN(FALSE);
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 275cfbaa088..91c71193df2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1656,8 +1656,23 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
my_casedn_str(files_charset_info, path);
create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
}
- else
+ else
+ {
+ #ifdef FN_DEVCHAR
+ /* check if the table name contains FN_DEVCHAR when defined */
+ const char *start= alias;
+ while (*start != '\0')
+ {
+ if (*start == FN_DEVCHAR)
+ {
+ my_error(ER_WRONG_TABLE_NAME, MYF(0), alias);
+ DBUG_RETURN(TRUE);
+ }
+ start++;
+ }
+ #endif
build_table_path(path, sizeof(path), db, alias, reg_ext);
+ }
/* Check if table already exists */
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE)
@@ -1674,8 +1689,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias);
DBUG_RETURN(TRUE);
}
- if (wait_if_global_read_lock(thd, 0, 1))
- DBUG_RETURN(TRUE);
VOID(pthread_mutex_lock(&LOCK_open));
if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE))
{
@@ -1743,7 +1756,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name,
end:
VOID(pthread_mutex_unlock(&LOCK_open));
- start_waiting_global_read_lock(thd);
thd->proc_info="After create";
DBUG_RETURN(error);
@@ -1923,7 +1935,7 @@ void close_cached_table(THD *thd, TABLE *table)
thd->open_tables=unlink_open_table(thd,thd->open_tables,table);
/* When lock on LOCK_open is freed other threads can continue */
- pthread_cond_broadcast(&COND_refresh);
+ broadcast_refresh();
DBUG_VOID_RETURN;
}
@@ -2672,7 +2684,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
TABLE_LIST src_tables_list;
DBUG_ENTER("mysql_create_like_table");
- src_db= table_ident->db.str ? table_ident->db.str : thd->db;
+ DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */
+ src_db= table_ident->db.str;
/*
Validate the source table
@@ -3894,7 +3907,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
if (error)
{
VOID(pthread_mutex_unlock(&LOCK_open));
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
goto err;
}
thd->proc_info="end";
@@ -3904,7 +3917,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE);
mysql_bin_log.write(&qinfo);
}
- VOID(pthread_cond_broadcast(&COND_refresh));
+ broadcast_refresh();
VOID(pthread_mutex_unlock(&LOCK_open));
#ifdef HAVE_BERKELEY_DB
if (old_db_type == DB_TYPE_BERKELEY_DB)
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index f943b014118..db1d1a10b11 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -932,8 +932,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
save_db.str= thd->db;
save_db.length= thd->db_length;
- thd->db_length= strlen(db);
- thd->db= (char *) db;
+ thd->reset_db((char*) db, strlen(db));
while ((trg_create_str= it++))
{
trg_sql_mode= itm++;
@@ -1035,8 +1034,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
lex_end(&lex);
}
- thd->db= save_db.str;
- thd->db_length= save_db.length;
+ thd->reset_db(save_db.str, save_db.length);
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
@@ -1049,8 +1047,7 @@ err_with_lex_cleanup:
thd->lex= old_lex;
thd->spcont= save_spcont;
thd->variables.sql_mode= save_sql_mode;
- thd->db= save_db.str;
- thd->db_length= save_db.length;
+ thd->reset_db(save_db.str, save_db.length);
DBUG_RETURN(1);
}
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 6269c0a2eb3..95589a58b37 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -140,6 +140,7 @@ void udf_init()
READ_RECORD read_record_info;
TABLE *table;
int error;
+ char db[]= "mysql"; /* A subject to casednstr, can't be constant */
DBUG_ENTER("ufd_init");
if (initialized)
@@ -161,13 +162,12 @@ void udf_init()
initialized = 1;
new_thd->thread_stack= (char*) &new_thd;
new_thd->store_globals();
- new_thd->db= my_strdup("mysql", MYF(0));
- new_thd->db_length=5;
+ new_thd->set_db(db, sizeof(db)-1);
bzero((gptr) &tables,sizeof(tables));
tables.alias= tables.table_name= (char*) "func";
tables.lock_type = TL_READ;
- tables.db=new_thd->db;
+ tables.db= db;
if (simple_open_n_lock_tables(new_thd, &tables))
{
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index d588572a762..d0729deecaa 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -70,6 +70,7 @@ class udf_handler :public Sql_alloc
void cleanup();
double val(my_bool *null_value)
{
+ is_null= 0;
if (get_arguments())
{
*null_value=1;
@@ -88,6 +89,7 @@ class udf_handler :public Sql_alloc
}
longlong val_int(my_bool *null_value)
{
+ is_null= 0;
if (get_arguments())
{
*null_value=1;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 0f836bd58ff..1561ade78af 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -452,15 +452,15 @@ bool mysql_create_view(THD *thd,
*/
for (sl= select_lex; sl; sl= sl->next_select())
{
- char *db= view->db ? view->db : thd->db;
+ DBUG_ASSERT(view->db); /* Must be set in the parser */
List_iterator_fast<Item> it(sl->item_list);
Item *item;
- fill_effective_table_privileges(thd, &view->grant, db,
+ fill_effective_table_privileges(thd, &view->grant, view->db,
view->table_name);
while ((item= it++))
{
Item_field *fld;
- uint priv= (get_column_grant(thd, &view->grant, db,
+ uint priv= (get_column_grant(thd, &view->grant, view->db,
view->table_name, item->name) &
VIEW_ANY_ACL);
if ((fld= item->filed_for_view_update()))
@@ -641,8 +641,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
if (!parser->ok() || !is_equal(&view_type, parser->type()))
{
- my_error(ER_WRONG_OBJECT, MYF(0),
- (view->db ? view->db : thd->db), view->table_name, "VIEW");
+ my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW");
DBUG_RETURN(-1);
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 4f3cf4d8554..e45be1ef148 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1237,12 +1237,18 @@ sp_name:
}
| ident
{
+ THD *thd= YYTHD;
+ LEX_STRING db;
if (check_routine_name($1))
{
my_error(ER_SP_WRONG_NAME, MYF(0), $1.str);
YYABORT;
}
- $$= sp_name_current_db_new(YYTHD, $1);
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ $$= new sp_name(db, $1);
+ if ($$)
+ $$->init_qname(YYTHD);
}
;
@@ -2405,14 +2411,26 @@ create2:
| LIKE table_ident
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
if (!(lex->name= (char *)$2))
YYABORT;
+ if ($2->db.str == NULL &&
+ thd->copy_db_to(&($2->db.str), &($2->db.length)))
+ {
+ YYABORT;
+ }
}
| '(' LIKE table_ident ')'
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
if (!(lex->name= (char *)$3))
YYABORT;
+ if ($3->db.str == NULL &&
+ thd->copy_db_to(&($3->db.str), &($3->db.length)))
+ {
+ YYABORT;
+ }
}
;
@@ -3240,7 +3258,9 @@ alter:
lex->key_list.empty();
lex->col_list.empty();
lex->select_lex.init_order();
- lex->select_lex.db=lex->name=0;
+ lex->select_lex.db=
+ ((TABLE_LIST*) lex->select_lex.table_list.first)->db;
+ lex->name=0;
bzero((char*) &lex->create_info,sizeof(lex->create_info));
lex->create_info.db_type= DB_TYPE_DEFAULT;
lex->create_info.default_table_charset= NULL;
@@ -3258,8 +3278,11 @@ alter:
opt_create_database_options
{
LEX *lex=Lex;
+ THD *thd= Lex->thd;
lex->sql_command=SQLCOM_ALTER_DB;
lex->name= $3;
+ if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL))
+ YYABORT;
}
| ALTER PROCEDURE sp_name
{
@@ -3421,14 +3444,20 @@ alter_list_item:
| RENAME opt_to table_ident
{
LEX *lex=Lex;
+ THD *thd= lex->thd;
lex->select_lex.db=$3->db.str;
- lex->name= $3->table.str;
+ if (lex->select_lex.db == NULL &&
+ thd->copy_db_to(&lex->select_lex.db, NULL))
+ {
+ YYABORT;
+ }
if (check_table_name($3->table.str,$3->table.length) ||
$3->db.str && check_db_name($3->db.str))
{
my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str);
YYABORT;
}
+ lex->name= $3->table.str;
lex->alter_info.flags|= ALTER_RENAME;
}
| CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
@@ -4742,7 +4771,13 @@ simple_expr:
#endif /* HAVE_DLOPEN */
{
LEX *lex= Lex;
- sp_name *name= sp_name_current_db_new(YYTHD, $1);
+ THD *thd= lex->thd;
+ LEX_STRING db;
+ if (thd->copy_db_to(&db.str, &db.length))
+ YYABORT;
+ sp_name *name= new sp_name(db, $1);
+ if (name)
+ name->init_qname(thd);
sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION);
if ($4)
@@ -8460,7 +8495,9 @@ grant_ident:
'*'
{
LEX *lex= Lex;
- lex->current_select->db= lex->thd->db;
+ THD *thd= lex->thd;
+ if (thd->copy_db_to(&lex->current_select->db, NULL))
+ YYABORT;
if (lex->grant == GLOBAL_ACLS)
lex->grant = DB_ACLS & ~GRANT_ACL;
else if (lex->columns.elements)
diff --git a/sql/table.cc b/sql/table.cc
index f3c9b8f8a8a..9ec9463c33c 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1593,10 +1593,6 @@ bool check_db_name(char *name)
if (*name == '/' || *name == '\\' || *name == FN_LIBCHAR ||
*name == FN_EXTCHAR)
return 1;
-#ifdef FN_DEVCHAR
- if (*name == FN_DEVCHAR)
- return 1;
-#endif
name++;
}
return last_char_is_space || (uint) (name - start) > NAME_LEN;
@@ -1639,10 +1635,6 @@ bool check_table_name(const char *name, uint length)
#endif
if (*name == '/' || *name == '\\' || *name == FN_EXTCHAR)
return 1;
-#ifdef FN_DEVCHAR
- if (*name == FN_DEVCHAR)
- return 1;
-#endif
name++;
}
#if defined(USE_MB) && defined(USE_MB_IDENT)
diff --git a/sql/table.h b/sql/table.h
index 106421d7a17..ebb4481ef3a 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -599,7 +599,8 @@ typedef struct st_table_list
thr_lock_type lock_type;
uint outer_join; /* Which join type */
uint shared; /* Used in multi-upd */
- uint32 db_length, table_name_length;
+ uint db_length;
+ uint32 table_name_length;
bool updatable; /* VIEW/TABLE can be updated now */
bool straight; /* optimize with prev table */
bool updating; /* for replicate-do/ignore table */
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 079abfc9299..d12aef47b40 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1548,6 +1548,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
TABLE *table;
Tz_names_entry *tmp_tzname;
my_bool return_val= 1;
+ char db[]= "mysql";
int res;
DBUG_ENTER("my_tz_init");
@@ -1604,13 +1605,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
leap seconds shared by all time zones.
*/
- thd->db= my_strdup("mysql",MYF(0));
- thd->db_length= 5; // Safety
+ thd->set_db(db, sizeof(db)-1);
bzero((char*) &tables_buff, sizeof(TABLE_LIST));
tables_buff[0].alias= tables_buff[0].table_name=
(char*)"time_zone_leap_second";
tables_buff[0].lock_type= TL_READ;
- tables_buff[0].db= thd->db;
+ tables_buff[0].db= db;
/*
Fill TABLE_LIST for the rest of the time zone describing tables
and link it to first one.
diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh
index 92d0708200b..fd92c2bd25e 100644
--- a/support-files/mysql.spec.sh
+++ b/support-files/mysql.spec.sh
@@ -485,17 +485,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# Initiate databases if needed
%{_bindir}/mysql_install_db --rpm --user=%{mysqld_user}
-# Upgrade databases if needed
-# This must be done as database user "root", who should be password-protected,
-# but this password is not available here.
-# So ensure the server is isolated as much as possible, and start it so that
-# passwords are not checked.
-# See the related change in the start script "/etc/init.d/mysql".
-chmod 700 $mysql_datadir
-%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables
-%{_bindir}/mysql_upgrade
-%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables
-chmod 755 $mysql_datadir
+# Upgrade databases if needed would go here - but it cannot be automated yet
# Change permissions again to fix any new files.
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
@@ -598,6 +588,7 @@ fi
%attr(755, root, root) %{_bindir}/mysqlbug
%attr(755, root, root) %{_bindir}/mysqld_multi
%attr(755, root, root) %{_bindir}/mysqld_safe
+%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlhotcopy
%attr(755, root, root) %{_bindir}/mysqltest
%attr(755, root, root) %{_bindir}/perror
@@ -628,7 +619,6 @@ fi
%attr(755, root, root) %{_bindir}/mysqlbinlog
%attr(755, root, root) %{_bindir}/mysqlcheck
%attr(755, root, root) %{_bindir}/mysqldump
-%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlimport
%attr(755, root, root) %{_bindir}/mysqlshow
@@ -733,6 +723,20 @@ fi
# itself - note that they must be ordered by date (important when
# merging BK trees)
%changelog
+* Tue Jun 27 2006 Joerg Bruehe <joerg@mysql.com>
+
+- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216)
+
+- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade,
+ there are some more aspects which need to be solved before this is possible.
+ For now, just ensure the binary "mysql_upgrade" is delivered and installed.
+
+* Thu Jun 22 2006 Joerg Bruehe <joerg@mysql.com>
+
+- Close a gap of the previous version by explicitly using
+ a newly created temporary directory for the socket to be used
+ in the "mysql_upgrade" operation, overriding any local setting.
+
* Tue Jun 20 2006 Joerg Bruehe <joerg@mysql.com>
- To run "mysql_upgrade", we need a running server;