summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/mysqlslap.c9
-rw-r--r--cmakelists.txt42
-rw-r--r--mysql-test/lib/mtr_stress.pl1
-rwxr-xr-xmysql-test/mysql-test-run.pl33
-rw-r--r--mysql-test/r/cast.result8
-rw-r--r--mysql-test/r/csv.result93
-rw-r--r--mysql-test/r/events.result42
-rw-r--r--mysql-test/r/events_bugs.result65
-rw-r--r--mysql-test/r/ndb_condition_pushdown.result18
-rw-r--r--mysql-test/r/ndb_insert.result16
-rw-r--r--mysql-test/r/partition.result101
-rw-r--r--mysql-test/r/rpl_ndb_sp007.result50
-rw-r--r--mysql-test/t/cast.test9
-rw-r--r--mysql-test/t/csv.test93
-rw-r--r--mysql-test/t/disabled.def9
-rw-r--r--mysql-test/t/events.test33
-rw-r--r--mysql-test/t/events_bugs.test27
-rw-r--r--mysql-test/t/ndb_condition_pushdown.test12
-rw-r--r--mysql-test/t/ndb_insert.test8
-rw-r--r--mysql-test/t/partition.test75
-rw-r--r--mysql-test/t/partition_mgm_err2.test1
-rw-r--r--mysql-test/t/rpl_ndb_sp007.test9
-rw-r--r--sql/cmakelists.txt83
-rw-r--r--sql/field.cc5
-rw-r--r--sql/ha_heap.cc1
-rw-r--r--sql/ha_ndbcluster.cc20
-rw-r--r--sql/handler.cc2
-rw-r--r--sql/handlerton-win.cc72
-rw-r--r--sql/item_timefunc.cc1
-rw-r--r--sql/key.cc1
-rw-r--r--sql/log.cc2
-rw-r--r--sql/mysqld.cc1
-rw-r--r--sql/opt_range.cc1
-rw-r--r--sql/sp.cc1
-rw-r--r--sql/sp_cache.cc1
-rw-r--r--sql/sql_base.cc4
-rw-r--r--sql/sql_partition.cc23
-rw-r--r--sql/sql_partition.h2
-rw-r--r--storage/csv/cmakelists.txt5
-rw-r--r--storage/csv/ha_tina.cc382
-rw-r--r--storage/csv/ha_tina.h18
-rw-r--r--storage/ndb/test/ndbapi/test_event.cpp2
-rw-r--r--win/Makefile.am2
43 files changed, 1110 insertions, 273 deletions
diff --git a/client/mysqlslap.c b/client/mysqlslap.c
index e30b8537b79..904bf432071 100644
--- a/client/mysqlslap.c
+++ b/client/mysqlslap.c
@@ -266,6 +266,7 @@ int main(int argc, char **argv)
my_end(0);
exit(1);
}
+
/* globals? Yes, so we only have to run strlen once */
delimiter_length= strlen(delimiter);
@@ -455,9 +456,10 @@ static struct my_option my_long_options[] =
(gptr*) &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0,
0},
{"preserve-schema", OPT_MYSQL_PRESERVE_SCHEMA,
- "Preserve the schema from the mysqlslap run.",
+ "Preserve the schema from the mysqlslap run, this happens unless \
+ --auto-generate-sql or --create are used.",
(gptr*) &opt_preserve, (gptr*) &opt_preserve, 0, GET_BOOL,
- NO_ARG, 0, 0, 0, 0, 0, 0},
+ NO_ARG, TRUE, 0, 0, 0, 0, 0},
{"protocol", OPT_MYSQL_PROTOCOL,
"The protocol of connection (tcp,socket,pipe,memory).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -750,6 +752,9 @@ get_options(int *argc,char ***argv)
if (!user)
user= (char *)"root";
+ if (create_string || auto_generate_sql )
+ opt_preserve= FALSE;
+
if (auto_generate_sql && (create_string || user_supplied_query))
{
fprintf(stderr,
diff --git a/cmakelists.txt b/cmakelists.txt
index cbb2a10c356..a5bdb1a7894 100644
--- a/cmakelists.txt
+++ b/cmakelists.txt
@@ -21,28 +21,38 @@ CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/support-files/my-small.cnf.sh
SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D DBUG_OFF")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -D DBUG_OFF")
-#IF(CMAKE_GENERATOR MATCHES "Visual Studio 8")
-# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /wd4996")
-# SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /wd4996")
-# SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /wd4996")
-# SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /wd4996")
-#ENDIF(CMAKE_GENERATOR MATCHES "Visual Studio 8")
+IF(CMAKE_GENERATOR MATCHES "Visual Studio 8")
+ SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /wd4996")
+ SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /wd4996")
+ SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /wd4996")
+ SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /wd4996")
+ENDIF(CMAKE_GENERATOR MATCHES "Visual Studio 8")
-IF(CMAKE_GENERATOR MATCHES "Visual Studio 7" OR CMAKE_GENERATOR MATCHES "Visual Studio 8")
+IF(CMAKE_GENERATOR MATCHES "Visual Studio 7" OR
+ CMAKE_GENERATOR MATCHES "Visual Studio 8")
# replace /MDd with /MTd
- STRING(REPLACE "/MDd" "/MTd" CMAKE_CXX_FLAGS_DEBUG_INIT ${CMAKE_CXX_FLAGS_DEBUG_INIT})
- STRING(REPLACE "/MDd" "/MTd" CMAKE_C_FLAGS_DEBUG_INIT ${CMAKE_C_FLAGS_DEBUG_INIT})
- STRING(REPLACE "/MD" "/MT" CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})
- STRING(REPLACE "/MDd" "/MTd" CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})
- STRING(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
- STRING(REPLACE "/MDd" "/MTd" CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})
+ STRING(REPLACE "/MDd" "/MTd" CMAKE_CXX_FLAGS_DEBUG_INIT
+ ${CMAKE_CXX_FLAGS_DEBUG_INIT})
+ STRING(REPLACE "/MDd" "/MTd" CMAKE_C_FLAGS_DEBUG_INIT
+ ${CMAKE_C_FLAGS_DEBUG_INIT})
+ STRING(REPLACE "/MD" "/MT" CMAKE_C_FLAGS_RELEASE
+ ${CMAKE_C_FLAGS_RELEASE})
+ STRING(REPLACE "/MDd" "/MTd" CMAKE_C_FLAGS_DEBUG
+ ${CMAKE_C_FLAGS_DEBUG})
+ STRING(REPLACE "/MD" "/MT" CMAKE_CXX_FLAGS_RELEASE
+ ${CMAKE_CXX_FLAGS_RELEASE})
+ STRING(REPLACE "/MDd" "/MTd" CMAKE_CXX_FLAGS_DEBUG
+ ${CMAKE_CXX_FLAGS_DEBUG})
# remove support for Exception handling
STRING(REPLACE "/GX" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
- STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS_INIT ${CMAKE_CXX_FLAGS_INIT})
- STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS_DEBUG_INIT ${CMAKE_CXX_FLAGS_DEBUG_INIT})
-ENDIF(CMAKE_GENERATOR MATCHES "Visual Studio 7" OR CMAKE_GENERATOR MATCHES "Visual Studio 8")
+ STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS_INIT
+ ${CMAKE_CXX_FLAGS_INIT})
+ STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS_DEBUG_INIT
+ ${CMAKE_CXX_FLAGS_DEBUG_INIT})
+ENDIF(CMAKE_GENERATOR MATCHES "Visual Studio 7" OR
+ CMAKE_GENERATOR MATCHES "Visual Studio 8")
ADD_DEFINITIONS("-D_WINDOWS -D__WIN__ -D _CRT_SECURE_NO_DEPRECATE")
diff --git a/mysql-test/lib/mtr_stress.pl b/mysql-test/lib/mtr_stress.pl
index 77c3d8bb030..92bb220461b 100644
--- a/mysql-test/lib/mtr_stress.pl
+++ b/mysql-test/lib/mtr_stress.pl
@@ -21,7 +21,6 @@ sub run_stress_test ()
{
my $args;
- my $stress_basedir;
my $stress_suitedir;
mtr_report("Starting stress testing\n");
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index 6b87d5ddd6c..5839717bffc 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -166,7 +166,6 @@ our $path_my_basedir;
our $opt_vardir; # A path but set directly on cmd line
our $opt_vardir_trace; # unix formatted opt_vardir for trace files
our $opt_tmpdir; # A path but set directly on cmd line
-our $opt_restart_cleanup; # Source a file with SQL drop statements
our $opt_usage;
our $opt_suite;
@@ -588,16 +587,18 @@ sub command_line_setup () {
'with-ndbcluster-only' => \$opt_with_ndbcluster_only,
'ndb-extra-test' => \$opt_ndb_extra_test,
'do-test=s' => \$opt_do_test,
+ 'start-from=s' => \$opt_start_from,
'suite=s' => \$opt_suite,
'skip-rpl' => \$opt_skip_rpl,
'skip-im' => \$opt_skip_im,
'skip-test=s' => \$opt_skip_test,
+ 'big-test' => \$opt_big_test,
# Specify ports
'master_port=i' => \$opt_master_myport,
'slave_port=i' => \$opt_slave_myport,
- 'ndbcluster_port=i' => \$opt_ndbcluster_port,
- 'ndbcluster_port_slave=i' => \$opt_ndbcluster_port_slave,
+ 'ndbcluster-port=i' => \$opt_ndbcluster_port,
+ 'ndbcluster-port-slave=i' => \$opt_ndbcluster_port_slave,
'manager-port=i' => \$opt_manager_port, # Currently not used
'im-port=i' => \$im_port, # Instance Manager port.
'im-mysqld1-port=i' => \$im_mysqld1_port, # Port of mysqld, controlled by IM
@@ -607,13 +608,13 @@ sub command_line_setup () {
'record' => \$opt_record,
'check-testcases' => \$opt_check_testcases,
- # ???
+ # Extra options used when starting mysqld
'mysqld=s' => \@opt_extra_mysqld_opt,
# Run test on running server
'extern' => \$opt_extern,
- 'ndbconnectstring=s' => \$opt_ndbconnectstring,
- 'ndbconnectstring-slave=s' => \$opt_ndbconnectstring_slave,
+ 'ndb-connectstring=s' => \$opt_ndbconnectstring,
+ 'ndb-connectstring-slave=s' => \$opt_ndbconnectstring_slave,
# Debugging
'gdb' => \$opt_gdb,
@@ -652,7 +653,6 @@ sub command_line_setup () {
'vardir=s' => \$opt_vardir,
# Misc
- 'big-test' => \$opt_big_test,
'comment=s' => \$opt_comment,
'debug' => \$opt_debug,
'fast' => \$opt_fast,
@@ -661,13 +661,11 @@ sub command_line_setup () {
'netware' => \$opt_netware,
'old-master' => \$opt_old_master,
'reorder' => \$opt_reorder,
- 'restart-cleanup' => \$opt_restart_cleanup,
'script-debug' => \$opt_script_debug,
'sleep=i' => \$opt_sleep,
'socket=s' => \$opt_socket,
'start-dirty' => \$opt_start_dirty,
'start-and-exit' => \$opt_start_and_exit,
- 'start-from=s' => \$opt_start_from,
'timer' => \$opt_timer,
'unified-diff|udiff' => \$opt_udiff,
'user-test=s' => \$opt_user_test,
@@ -3260,12 +3258,6 @@ sub run_mysqltest ($) {
mtr_add_arg($args, "--sleep=%d", $opt_sleep);
}
- if ( $opt_restart_cleanup and $glob_mysqld_restart )
- {
- mtr_add_arg($args, "--include=%s", "include/drop-on-restart.inc");
- $glob_mysqld_restart= 0;
- }
-
if ( $opt_debug )
{
mtr_add_arg($args, "--debug=d:t:A,%s/log/mysqltest.trace", $opt_vardir_trace);
@@ -3561,12 +3553,15 @@ Options to control what test suites or cases to run
skip-rpl Skip the replication test cases.
skip-im Don't start IM, and skip the IM test cases
skip-test=PREFIX Skip test cases which name are prefixed with PREFIX
+ big-test Pass "--big-test" to mysqltest which will set the environment
+ variable BIG_TEST, which can be checked from test cases.
Options that specify ports
master_port=PORT Specify the port number used by the first master
slave_port=PORT Specify the port number used by the first slave
- ndbcluster_port=PORT Specify the port number used by cluster
+ ndbcluster-port=PORT Specify the port number used by cluster
+ ndbcluster-port-slave=PORT Specify the port number used by slave cluster
Options for test case authoring
@@ -3620,16 +3615,14 @@ Misc options
help Get this help text
unified-diff | udiff When presenting differences, use unified diff
- testcase-timeout=MINUTES Max test case run time (default 5)
- suite-timeout=MINUTES Max test suite run time (default 120)
+ testcase-timeout=MINUTES Max test case run time (default $default_testcase_timeout)
+ suite-timeout=MINUTES Max test suite run time (default $default_suite_timeout)
Deprecated options
with-openssl Deprecated option for ssl
Options not yet described, or that I want to look into more
-
- big-test
debug
local
local-master
diff --git a/mysql-test/r/cast.result b/mysql-test/r/cast.result
index 6d184da10e6..2f66b982d5a 100644
--- a/mysql-test/r/cast.result
+++ b/mysql-test/r/cast.result
@@ -337,6 +337,14 @@ Warning 1105 Cast to signed converted positive out-of-range integer to it's nega
select cast(1.0e+300 as signed int);
cast(1.0e+300 as signed int)
9223372036854775807
+CREATE TABLE t1 (f1 double);
+INSERT INTO t1 SET f1 = -1.0e+30 ;
+INSERT INTO t1 SET f1 = +1.0e+30 ;
+SELECT f1 AS double_val, CAST(f1 AS SIGNED INT) AS cast_val FROM t1;
+double_val cast_val
+-1e+30 -9223372036854775808
+1e+30 9223372036854775807
+DROP TABLE t1;
select cast('1.2' as decimal(3,2));
cast('1.2' as decimal(3,2))
1.20
diff --git a/mysql-test/r/csv.result b/mysql-test/r/csv.result
index 9e63b82c29d..70eaac2eb4e 100644
--- a/mysql-test/r/csv.result
+++ b/mysql-test/r/csv.result
@@ -4993,6 +4993,99 @@ val
2
UNLOCK TABLES;
DROP TABLE test_concurrent_insert;
+CREATE TABLE test_repair_table ( val integer ) ENGINE = CSV;
+CHECK TABLE test_repair_table;
+Table Op Msg_type Msg_text
+test.test_repair_table check status OK
+REPAIR TABLE test_repair_table;
+Table Op Msg_type Msg_text
+test.test_repair_table repair status OK
+DROP TABLE test_repair_table;
+CREATE TABLE test_repair_table2 ( val integer ) ENGINE = CSV;
+SELECT * from test_repair_table2;
+val
+Warnings:
+Error 1194 Table 'test_repair_table2' is marked as crashed and should be repaired
+SELECT * from test_repair_table2;
+val
+test_repair_table2.CSM
+CHECK TABLE test_repair_table2;
+Table Op Msg_type Msg_text
+test.test_repair_table2 check status OK
+DROP TABLE test_repair_table2;
+CREATE TABLE test_repair_table3 ( val integer ) ENGINE = CSV;
+CHECK TABLE test_repair_table3;
+Table Op Msg_type Msg_text
+test.test_repair_table3 check error Corrupt
+REPAIR TABLE test_repair_table3;
+Table Op Msg_type Msg_text
+test.test_repair_table3 repair status OK
+SELECT * FROM test_repair_table3;
+val
+1
+4
+DROP TABLE test_repair_table3;
+CREATE TABLE test_repair_table4 (
+num int not null,
+magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+company_name char(30) DEFAULT '' NOT NULL,
+founded char(4) DEFAULT '' NOT NULL
+) ENGINE = CSV;
+SELECT * FROM test_repair_table4;
+num magic_no company_name founded
+Warnings:
+Error 1194 Table 'test_repair_table4' is marked as crashed and should be repaired
+SELECT * FROM test_repair_table4;
+num magic_no company_name founded
+CHECK TABLE test_repair_table4;
+Table Op Msg_type Msg_text
+test.test_repair_table4 check status OK
+INSERT INTO test_repair_table4 VALUES (2,101,'SAP','1972');
+INSERT INTO test_repair_table4 VALUES (1,101,'Microsoft','1978');
+INSERT INTO test_repair_table4 VALUES (2,101,'MySQL','1995');
+SELECT * FROM test_repair_table4;
+num magic_no company_name founded
+2 0101 SAP 1972
+1 0101 Microsoft 1978
+2 0101 MySQL 1995
+CHECK TABLE test_repair_table4;
+Table Op Msg_type Msg_text
+test.test_repair_table4 check status OK
+REPAIR TABLE test_repair_table4;
+Table Op Msg_type Msg_text
+test.test_repair_table4 repair status OK
+SELECT * FROM test_repair_table4;
+num magic_no company_name founded
+2 0101 SAP 1972
+1 0101 Microsoft 1978
+2 0101 MySQL 1995
+CHECK TABLE test_repair_table4;
+Table Op Msg_type Msg_text
+test.test_repair_table4 check status OK
+REPAIR TABLE test_repair_table4;
+Table Op Msg_type Msg_text
+test.test_repair_table4 repair status OK
+SELECT * FROM test_repair_table4;
+num magic_no company_name founded
+2 0101 SAP 1972
+1 0101 Microsoft 1978
+2 0101 MySQL 1995
+DROP TABLE test_repair_table4;
+CREATE TABLE test_repair_table5 (
+num int not null,
+magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+company_name char(30) DEFAULT '' NOT NULL,
+founded char(4) DEFAULT '' NOT NULL
+) ENGINE = CSV;
+CHECK TABLE test_repair_table5;
+Table Op Msg_type Msg_text
+test.test_repair_table5 check error Corrupt
+REPAIR TABLE test_repair_table5;
+Table Op Msg_type Msg_text
+test.test_repair_table5 repair status OK
+SELECT * FROM test_repair_table5;
+num magic_no company_name founded
+DROP TABLE test_repair_table5;
create table t1 (a int) engine=csv;
insert t1 values (1);
delete from t1;
diff --git a/mysql-test/r/events.result b/mysql-test/r/events.result
index 6132e4def77..06c846da781 100644
--- a/mysql-test/r/events.result
+++ b/mysql-test/r/events.result
@@ -260,10 +260,8 @@ ALTER TABLE mysql.event MODIFY db char(64) character set utf8 collate utf8_bin d
"This should work"
SHOW EVENTS;
Db Name Definer Type Execute at Interval value Interval field Starts Ends Status
-events_test intact_check root@localhost RECURRING NULL 10 HOUR # # ENABLED
+events_test intact_check root@localhost RECURRING NULL 10 HOUR # # ENABLED
ALTER TABLE mysql.event MODIFY db char(64) character set cp1251 default '';
-Warnings:
-Warning 1265 Data truncated for column 'db' at row 1
SELECT event_name FROM INFORMATION_SCHEMA.EVENTS;
ERROR HY000: Cannot load from mysql.event. Table probably corrupted. See error log.
ALTER TABLE mysql.event MODIFY db varchar(64) character set utf8 collate utf8_bin default '';
@@ -389,10 +387,9 @@ create event закачка on schedule every 10 hour do select get_lock("test_l
select definer, name, db from mysql.event;
definer name db
root@localhost закачка events_test
-"Should be only 1 process"
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
+"Should be 0 processes"
+select /*1*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
select release_lock("test_lock1");
release_lock("test_lock1")
1
@@ -409,12 +406,11 @@ get_lock("test_lock2", 20)
"Create an event which tries to acquire a mutex. The event locks on the mutex"
create event закачка on schedule every 10 hour do select get_lock("test_lock2", 20);
"Let some time pass to the event starts"
-"Should have only 3 processes: the scheduler, our conn and the locked event"
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
-# root localhost events_test Connect # User lock select get_lock("test_lock2", 20)
+"Should have only 2 processes: the scheduler and the locked event"
+select /*1*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
+root localhost events_test Connect User lock select get_lock("test_lock2", 20)
"Release the mutex, the event worker should finish."
select release_lock("test_lock2");
release_lock("test_lock2")
@@ -425,23 +421,21 @@ select get_lock("test_lock2_1", 20);
get_lock("test_lock2_1", 20)
1
create event закачка21 on schedule every 10 hour do select get_lock("test_lock2_1", 20);
-"Should see 2 processes, one locked on get_lock("
+"Should see 1 process, locked on get_lock("
"Shutting down the scheduler, it should wait for the running event"
set global event_scheduler=0;
-"Should have only 3 processes: the scheduler, our conn and the locked event"
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
-# root localhost events_test Connect # User lock select get_lock("test_lock2_1", 20)
+"Should have only 2 processes: the scheduler and the locked event"
+select /*4*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
+root localhost events_test Connect User lock select get_lock("test_lock2_1", 20)
"Release the lock so the child process should finish. Hence the scheduler also"
select release_lock("test_lock2_1");
release_lock("test_lock2_1")
1
-"Should have only our process now:"
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
+"Should see 0 processes now:"
+select /*5*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
drop event закачка21;
create table t_16 (s1 int);
create trigger t_16_bi before insert on t_16 for each row create event e_16 on schedule every 1 second do set @a=5;
diff --git a/mysql-test/r/events_bugs.result b/mysql-test/r/events_bugs.result
index a8d4bd19576..ef1ccfadecb 100644
--- a/mysql-test/r/events_bugs.result
+++ b/mysql-test/r/events_bugs.result
@@ -5,10 +5,10 @@ CREATE EVENT Lower_case ON SCHEDULE EVERY 2 MINUTE DO SELECT 2;
ERROR HY000: Event 'Lower_case' already exists
DROP EVENT Lower_case;
SET NAMES cp1251;
-CREATE EVENT äîëåí_ðåãèñòúð_1251 ON SCHEDULE EVERY 1 YEAR DO SELECT 100;
-CREATE EVENT ÄîËåÃ_ðåãèñòúð_1251 ON SCHEDULE EVERY 2 YEAR DO SELECT 200;
+CREATE EVENT äîëåí_ðåãèñòúð_1251 ON SCHEDULE EVERY 1 YEAR DO SELECT 100;
+CREATE EVENT ÄîËåÍ_ðåãèñòúð_1251 ON SCHEDULE EVERY 2 YEAR DO SELECT 200;
ERROR HY000: Event 'ДоЛеÐ_региÑтър_1251' already exists
-DROP EVENT ÄîËåÃ_ðåãèñòúð_1251;
+DROP EVENT ÄîËåÍ_ðåãèñòúð_1251;
SET NAMES utf8;
CREATE EVENT долен_региÑтър_утф8 ON SCHEDULE EVERY 3 YEAR DO SELECT 300;
CREATE EVENT ДОЛЕÐ_региÑтър_утф8 ON SCHEDULE EVERY 4 YEAR DO SELECT 400;
@@ -50,11 +50,10 @@ select get_lock('test_bug16407', 60);
drop table "hashed_num";
end|
"Now if everything is fine the event has compiled and is locked
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
-# root localhost events_test Connect # User lock select get_lock('test_bug16407', 60)
+select /*1*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
+root localhost events_test Connect User lock select get_lock('test_bug16407', 60)
select release_lock('test_bug16407');
release_lock('test_bug16407')
1
@@ -75,7 +74,7 @@ create table events_smode_test(ev_name char(10), a date) engine=myisam;
"This should never insert something"
create event ee_16407_2 on schedule every 60 second do
begin
-select get_lock('ee_16407_2', 60);
+select get_lock('ee_16407_2', 60) /*ee_16407_2*/;
insert into events_test.events_smode_test values('ee_16407_2','1980-19-02');
end|
insert into events_smode_test values ('test','1980-19-02')|
@@ -83,7 +82,7 @@ ERROR 22007: Incorrect date value: '1980-19-02' for column 'a' at row 1
"This is ok"
create event ee_16407_3 on schedule every 60 second do
begin
-select get_lock('ee_16407_2', 60);
+select get_lock('ee_16407_2', 60) /*ee_16407_3*/;
insert into events_test.events_smode_test values ('ee_16407_3','1980-02-19');
insert into events_test.events_smode_test values ('ee_16407_3','1980-02-29');
end|
@@ -91,7 +90,7 @@ set sql_mode=""|
"This will insert rows but they will be truncated"
create event ee_16407_4 on schedule every 60 second do
begin
-select get_lock('ee_16407_2', 60);
+select get_lock('ee_16407_2', 60) /*ee_16407_4*/;
insert into events_test.events_smode_test values ('ee_16407_4','10-11-1956');
end|
select event_schema, event_name, sql_mode from information_schema.events order by event_schema, event_name;
@@ -104,20 +103,18 @@ select get_lock('ee_16407_2', 60);
get_lock('ee_16407_2', 60)
1
set global event_scheduler= 1;
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
-# root localhost events_test Connect # User lock select get_lock('ee_16407_2', 60)
-# root localhost events_test Connect # User lock select get_lock('ee_16407_2', 60)
-# root localhost events_test Connect # User lock select get_lock('ee_16407_2', 60)
+select /*2*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
+root localhost events_test Connect User lock select get_lock('ee_16407_2', 60) /*ee_16407_2*/
+root localhost events_test Connect User lock select get_lock('ee_16407_2', 60) /*ee_16407_3*/
+root localhost events_test Connect User lock select get_lock('ee_16407_2', 60) /*ee_16407_4*/
select release_lock('ee_16407_2');
release_lock('ee_16407_2')
1
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
+select /*3*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
set global event_scheduler= 0;
select * from events_smode_test order by ev_name, a;
ev_name a
@@ -140,12 +137,12 @@ create procedure ee_16407_5_pendant() begin insert into events_test.events_smode
create procedure ee_16407_6_pendant() begin insert into events_test.events_smode_test values('ee_16407_6','2004-02-29'); end|
create event ee_16407_5 on schedule every 60 second do
begin
-select get_lock('ee_16407_5', 60);
+select get_lock('ee_16407_5', 60) /*ee_16407_5*/;
call events_test.ee_16407_5_pendant();
end|
create event ee_16407_6 on schedule every 60 second do
begin
-select get_lock('ee_16407_5', 60);
+select get_lock('ee_16407_5', 60) /*ee_16407_6*/;
call events_test.ee_16407_6_pendant();
end|
set sql_mode='ansi';
@@ -153,19 +150,19 @@ select get_lock('ee_16407_5', 60);
get_lock('ee_16407_5', 60)
1
set global event_scheduler= 1;
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
-# root localhost events_test Connect # User lock select get_lock('ee_16407_5', 60)
-# root localhost events_test Connect # User lock select get_lock('ee_16407_5', 60)
+"Should have 2 locked processes"
+select /*4*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
+root localhost events_test Connect User lock select get_lock('ee_16407_5', 60) /*ee_16407_5*/
+root localhost events_test Connect User lock select get_lock('ee_16407_5', 60) /*ee_16407_6*/
select release_lock('ee_16407_5');
release_lock('ee_16407_5')
1
-show processlist;
-Id User Host db Command Time State Info
-# root localhost events_test Query # NULL show processlist
-# event_scheduler localhost NULL Connect # Sleeping NULL
+"Should have 0 processes locked"
+select /*5*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
+user host db command state info
+event_scheduler localhost NULL Connect Sleeping NULL
select * from events_smode_test order by ev_name, a;
ev_name a
ee_16407_6 2004-02-29
diff --git a/mysql-test/r/ndb_condition_pushdown.result b/mysql-test/r/ndb_condition_pushdown.result
index bdeff437f6f..3fbc9a38f60 100644
--- a/mysql-test/r/ndb_condition_pushdown.result
+++ b/mysql-test/r/ndb_condition_pushdown.result
@@ -1766,5 +1766,21 @@ select * from t3 left join t4 on t4.attr2 = t3.attr2 where t4.attr1 > 1 and t4.a
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
1 SIMPLE t4 ALL NULL NULL NULL NULL 6 Using where
+create table t5 (a int primary key auto_increment, b tinytext not null)
+engine = ndb;
+insert into t5 (b) values ('jonas'), ('jensing'), ('johan');
+set engine_condition_pushdown = off;
+select * from t5 where b like '%jo%' order by a;
+a b
+1 jonas
+3 johan
+set engine_condition_pushdown = on;
+explain select * from t5 where b like '%jo%';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t5 ALL NULL NULL NULL NULL 3 Using where
+select * from t5 where b like '%jo%' order by a;
+a b
+1 jonas
+3 johan
set engine_condition_pushdown = @old_ecpd;
-DROP TABLE t1,t2,t3,t4;
+DROP TABLE t1,t2,t3,t4,t5;
diff --git a/mysql-test/r/ndb_insert.result b/mysql-test/r/ndb_insert.result
index 1da958ef0f7..3f9a5d7c6c5 100644
--- a/mysql-test/r/ndb_insert.result
+++ b/mysql-test/r/ndb_insert.result
@@ -629,7 +629,23 @@ DELETE FROM t1;
CREATE UNIQUE INDEX ai ON t1(a);
INSERT IGNORE INTO t1 VALUES (1);
INSERT IGNORE INTO t1 VALUES (1);
+INSERT IGNORE INTO t1 VALUES (NULL),(2);
SELECT * FROM t1 ORDER BY a;
a
+NULL
1
+2
+DROP TABLE t1;
+CREATE TABLE t1(pk INT NOT NULL PRIMARY KEY, a INT, UNIQUE (a)) ENGINE=ndb;
+INSERT IGNORE INTO t1 VALUES (1,1),(2,2),(3,3);
+INSERT IGNORE INTO t1 VALUES (4,NULL),(5,NULL),(6,NULL),(7,4);
+SELECT * FROM t1 ORDER BY pk;
+pk a
+1 1
+2 2
+3 3
+4 NULL
+5 NULL
+6 NULL
+7 4
DROP TABLE t1;
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
index 7244dd6ccbf..f5d8cc8f349 100644
--- a/mysql-test/r/partition.result
+++ b/mysql-test/r/partition.result
@@ -718,4 +718,105 @@ CALL test.p1(13);
Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
drop table t1;
+create table t1 (f1 integer) partition by range(f1)
+(partition p1 values less than (0), partition p2 values less than (10));
+insert into t1 set f1 = null;
+select * from t1 where f1 is null;
+f1
+NULL
+explain partitions select * from t1 where f1 is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1 system NULL NULL NULL NULL 1
+drop table t1;
+create table t1 (f1 integer) partition by list(f1)
+(partition p1 values in (1), partition p2 values in (null));
+insert into t1 set f1 = null;
+insert into t1 set f1 = 1;
+select * from t1 where f1 is null or f1 = 1;
+f1
+1
+NULL
+drop table t1;
+create table t1 (f1 smallint)
+partition by list (f1) (partition p0 values in (null));
+insert into t1 values (null);
+select * from t1 where f1 is null;
+f1
+NULL
+drop table t1;
+create table t1 (f1 smallint)
+partition by range (f1) (partition p0 values less than (0));
+insert into t1 values (null);
+select * from t1 where f1 is null;
+f1
+NULL
+drop table t1;
+create table t1 (f1 integer) partition by list(f1)
+(
+partition p1 values in (1),
+partition p2 values in (NULL),
+partition p3 values in (2),
+partition p4 values in (3),
+partition p5 values in (4)
+);
+insert into t1 values (1),(2),(3),(4),(null);
+select * from t1 where f1 < 3;
+f1
+1
+2
+explain partitions select * from t1 where f1 < 3;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1,p3 ALL NULL NULL NULL NULL 2 Using where
+select * from t1 where f1 is null;
+f1
+NULL
+explain partitions select * from t1 where f1 is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2 system NULL NULL NULL NULL 1
+drop table t1;
+create table t1 (f1 int) partition by list(f1 div 2)
+(
+partition p1 values in (1),
+partition p2 values in (NULL),
+partition p3 values in (2),
+partition p4 values in (3),
+partition p5 values in (4)
+);
+insert into t1 values (2),(4),(6),(8),(null);
+select * from t1 where f1 < 3;
+f1
+2
+explain partitions select * from t1 where f1 < 3;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p1,p2,p3,p4,p5 ALL NULL NULL NULL NULL 5 Using where
+select * from t1 where f1 is null;
+f1
+NULL
+explain partitions select * from t1 where f1 is null;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p2 system NULL NULL NULL NULL 1
+drop table t1;
+create table t1 (a int) partition by LIST(a) (
+partition pn values in (NULL),
+partition p0 values in (0),
+partition p1 values in (1),
+partition p2 values in (2)
+);
+insert into t1 values (NULL),(0),(1),(2);
+select * from t1 where a is null or a < 2;
+a
+NULL
+0
+1
+explain partitions select * from t1 where a is null or a < 2;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pn,p0,p1 ALL NULL NULL NULL NULL 3 Using where
+select * from t1 where a is null or a < 0 or a > 1;
+a
+NULL
+2
+explain partitions select * from t1 where a is null or a < 0 or a > 1;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 pn,p2 ALL NULL NULL NULL NULL 2 Using where
+drop table t1;
End of 5.1 tests
diff --git a/mysql-test/r/rpl_ndb_sp007.result b/mysql-test/r/rpl_ndb_sp007.result
deleted file mode 100644
index 6e84f9940ef..00000000000
--- a/mysql-test/r/rpl_ndb_sp007.result
+++ /dev/null
@@ -1,50 +0,0 @@
-stop slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-reset master;
-reset slave;
-drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
-start slave;
-DROP PROCEDURE IF EXISTS test.p1;
-DROP TABLE IF EXISTS test.t1;
-CREATE PROCEDURE test.p1(IN i INT)
-BEGIN
-DECLARE CONTINUE HANDLER FOR sqlexception BEGIN END;
-DROP TABLE IF EXISTS test.t1;
-CREATE TABLE test.t1 (num INT,PRIMARY KEY(num))ENGINE=NDBCLUSTER;
-START TRANSACTION;
-INSERT INTO test.t1 VALUES(i);
-savepoint t1_save;
-INSERT INTO test.t1 VALUES (14);
-ROLLBACK to savepoint t1_save;
-COMMIT;
-END|
-
-< ---- Master selects-- >
--------------------------
-CALL test.p1(12);
-Warnings:
-Note 1051 Unknown table 't1'
-SELECT * FROM test.t1;
-num
-12
-
-< ---- Slave selects-- >
-------------------------
-SELECT * FROM test.t1;
-num
-12
-
-< ---- Master selects-- >
--------------------------
-CALL test.p1(13);
-SELECT * FROM test.t1;
-num
-13
-
-< ---- Slave selects-- >
-------------------------
-SELECT * FROM test.t1;
-num
-13
-DROP PROCEDURE IF EXISTS test.p1;
-DROP TABLE IF EXISTS test.t1;
diff --git a/mysql-test/t/cast.test b/mysql-test/t/cast.test
index dc7f695e38e..7e09f44397c 100644
--- a/mysql-test/t/cast.test
+++ b/mysql-test/t/cast.test
@@ -165,6 +165,15 @@ select cast(repeat('1',20) as signed);
#
select cast(1.0e+300 as signed int);
+#
+# Bugs: #15098: CAST(column double TO signed int), wrong result
+#
+CREATE TABLE t1 (f1 double);
+INSERT INTO t1 SET f1 = -1.0e+30 ;
+INSERT INTO t1 SET f1 = +1.0e+30 ;
+SELECT f1 AS double_val, CAST(f1 AS SIGNED INT) AS cast_val FROM t1;
+DROP TABLE t1;
+
# End of 4.1 tests
diff --git a/mysql-test/t/csv.test b/mysql-test/t/csv.test
index 916a2132deb..08237701d3a 100644
--- a/mysql-test/t/csv.test
+++ b/mysql-test/t/csv.test
@@ -1388,6 +1388,99 @@ UNLOCK TABLES;
DROP TABLE test_concurrent_insert;
#
+# Test REPAIR/CHECK TABLE (5.1)
+#
+
+# Check that repair on the newly created table works fine
+
+CREATE TABLE test_repair_table ( val integer ) ENGINE = CSV;
+
+CHECK TABLE test_repair_table;
+REPAIR TABLE test_repair_table;
+
+DROP TABLE test_repair_table;
+
+#
+# Check autorepair. Here we also check that we can work w/o metafile
+# restore the meta-file
+#
+
+CREATE TABLE test_repair_table2 ( val integer ) ENGINE = CSV;
+--exec rm $MYSQLTEST_VARDIR/master-data/test/test_repair_table2.CSM
+
+# Should give a warning and perform autorepair. We also disable ps-protocol
+# here, as mysql-test eats up warnings in ps-protocol mode
+
+--disable_ps_protocol
+SELECT * from test_repair_table2;
+--enable_ps_protocol
+# this should work ok, as the table is already repaired
+SELECT * from test_repair_table2;
+# check that the metafile appeared again. chop the path to it
+--exec ls $MYSQLTEST_VARDIR/master-data/test/test_repair_table2.CSM | perl -pi -e "s/.*\///"
+CHECK TABLE test_repair_table2;
+DROP TABLE test_repair_table2;
+
+
+# Corrupt csv file and see if we can repair it
+CREATE TABLE test_repair_table3 ( val integer ) ENGINE = CSV;
+--exec echo -n -e \"1\"\\n\"4\"\\n\"3 > $MYSQLTEST_VARDIR/master-data/test/test_repair_table3.CSV
+CHECK TABLE test_repair_table3;
+REPAIR TABLE test_repair_table3;
+SELECT * FROM test_repair_table3;
+DROP TABLE test_repair_table3;
+
+# Test with more sophisticated table
+
+CREATE TABLE test_repair_table4 (
+ num int not null,
+ magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+ company_name char(30) DEFAULT '' NOT NULL,
+ founded char(4) DEFAULT '' NOT NULL
+) ENGINE = CSV;
+
+--exec rm $MYSQLTEST_VARDIR/master-data/test/test_repair_table4.CSM
+--disable_ps_protocol
+SELECT * FROM test_repair_table4;
+--enable_ps_protocol
+SELECT * FROM test_repair_table4;
+CHECK TABLE test_repair_table4;
+
+INSERT INTO test_repair_table4 VALUES (2,101,'SAP','1972');
+INSERT INTO test_repair_table4 VALUES (1,101,'Microsoft','1978');
+INSERT INTO test_repair_table4 VALUES (2,101,'MySQL','1995');
+
+# list table content
+SELECT * FROM test_repair_table4;
+CHECK TABLE test_repair_table4;
+REPAIR TABLE test_repair_table4;
+# check that nothing changed
+SELECT * FROM test_repair_table4;
+# verify that check/repair did non corrupt the table itself
+CHECK TABLE test_repair_table4;
+REPAIR TABLE test_repair_table4;
+SELECT * FROM test_repair_table4;
+DROP TABLE test_repair_table4;
+
+# Run CHECK/REPAIR on the CSV file with a single row, which misses a column.
+
+CREATE TABLE test_repair_table5 (
+ num int not null,
+ magic_no int(4) unsigned zerofill DEFAULT '0000' NOT NULL,
+ company_name char(30) DEFAULT '' NOT NULL,
+ founded char(4) DEFAULT '' NOT NULL
+) ENGINE = CSV;
+
+# Corrupt a table -- put a file with wrong # of columns
+--exec echo -n -e \"1\",\"101\",\"IBM\"\\n > $MYSQLTEST_VARDIR/master-data/test/test_repair_table5.CSV
+
+CHECK TABLE test_repair_table5;
+REPAIR TABLE test_repair_table5;
+SELECT * FROM test_repair_table5;
+DROP TABLE test_repair_table5;
+
+
+#
# BUG#13406 - incorrect amount of "records deleted"
#
diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def
index c62c858bd00..9258f984a18 100644
--- a/mysql-test/t/disabled.def
+++ b/mysql-test/t/disabled.def
@@ -18,25 +18,30 @@ ndb_autodiscover2 : Needs to be fixed w.r.t binlog
#ndb_binlog_basic : Results are not deterministic, Tomas will fix
ndb_binlog_ddl_multi : Bug #18490 cluster: binlog events lost on remote mysqld (asynchronous)
#ndb_binlog_multi : Results are not deterministic, Tomas will fix
+ndb_cache2 : Bug #18597
+ndb_gis : Bug #18600
ndb_load : Bug#17233
+ndb_partition_list : Bug#18598
partition_03ndb : Bug#16385
ps_7ndb : dbug assert in RBR mode when executing test suite
#rpl_bit_npk : Bug#13418
#rpl_ddl : Bug#15963 SBR does not show "Definer" correctly
+rpl_ndb_dd_basic : Bug #18569
rpl_ndb_2innodb : assertion in get_parts_for_update()
rpl_ndb_2myisam : assertion in get_parts_for_update()
rpl_ndb_auto_inc : Bug#17086
-#rpl_ndb_basic : Bug#16228 [IN REVIEW]
+rpl_ndb_basic : Bug #18592
#rpl_ndb_blob : interferes with following tests, causing hang
#rpl_ndb_blob2 : interferes with following tests, causing hang
rpl_ndb_ddl : master hangs
#rpl_ndb_delete_nowhere : Bug#17400: delete & update of rows in table without pk fails
+rpl_ndb_insert_ignore : Bug #18567
rpl_ndb_innodb2ndb : Bug#18261: Cluster Replication: tests rpl_ndb_xxx2ndb fails
rpl_ndb_myisam2ndb : Bug#18261: Cluster Replication: tests rpl_ndb_xxx2ndb fails
rpl_ndb_log : result not deterministic, TBD if should remove
rpl_ndb_relay_space : Bug#16993
#rpl_ndb_multi_update3 : Bug#17400: delete & update of rows in table without pk fails
-rpl_ndb_sp007 : Bug #18565
+rpl_switch_stm_row_mixed : Bug #18590
rpl_row_inexist_tbl : Disabled since patch makes this test wait forever
rpl_sp : Bug#16456
rpl_until : Unstable test case, bug#15886
diff --git a/mysql-test/t/events.test b/mysql-test/t/events.test
index 6036dcb3000..755d4e7775b 100644
--- a/mysql-test/t/events.test
+++ b/mysql-test/t/events.test
@@ -341,9 +341,8 @@ create event закачка on schedule every 10 hour do select get_lock("test_l
--echo "Should return 1 row"
select definer, name, db from mysql.event;
---echo "Should be only 1 process"
---replace_column 1 # 6 #
-show processlist;
+--echo "Should be 0 processes"
+select /*1*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
select release_lock("test_lock1");
drop event закачка;
--echo "Should have 0 events"
@@ -359,9 +358,8 @@ select get_lock("test_lock2", 20);
create event закачка on schedule every 10 hour do select get_lock("test_lock2", 20);
--echo "Let some time pass to the event starts"
--sleep 2
---echo "Should have only 3 processes: the scheduler, our conn and the locked event"
---replace_column 1 # 6 #
-show processlist;
+--echo "Should have only 2 processes: the scheduler and the locked event"
+select /*1*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
--echo "Release the mutex, the event worker should finish."
select release_lock("test_lock2");
drop event закачка;
@@ -378,21 +376,18 @@ set global event_scheduler=1;
select get_lock("test_lock2_1", 20);
create event закачка21 on schedule every 10 hour do select get_lock("test_lock2_1", 20);
--sleep 1
---echo "Should see 2 processes, one locked on get_lock("
-#--replace_column 1 # 6 #
-#show processlist;
+--echo "Should see 1 process, locked on get_lock("
+#select /*3*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
--echo "Shutting down the scheduler, it should wait for the running event"
set global event_scheduler=0;
--sleep 1
---echo "Should have only 3 processes: the scheduler, our conn and the locked event"
---replace_column 1 # 6 #
-show processlist;
+--echo "Should have only 2 processes: the scheduler and the locked event"
+select /*4*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
--echo "Release the lock so the child process should finish. Hence the scheduler also"
select release_lock("test_lock2_1");
--sleep 1
---echo "Should have only our process now:"
---replace_column 1 # 6 #
-show processlist;
+--echo "Should see 0 processes now:"
+select /*5*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
drop event закачка21;
####
@@ -441,7 +436,7 @@ drop event e1;
##select get_lock("test_lock3", 20);
##create event закачка on schedule every 10 hour do select get_lock("test_lock3", 20);
##select sleep(2);
-##show processlist;
+##select /*7*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
##drop event закачка;
##select release_lock("test_lock3");
@@ -451,15 +446,13 @@ drop event e1;
##select get_lock("test_lock4", 20);
##create event закачка4 on schedule every 1 second do select get_lock("test_lock4", 20);
##select sleep(3);
-##--replace_column 1 # 6 #
-##show processlist;
+##select /*8*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
##drop event закачка4;
##select release_lock("test_lock4");
##set global event_scheduler=0;
##select sleep(2);
-##--replace_column 1 # 6 #
-##show processlist;
+##select /*9*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
##select count(*) from mysql.event;
drop database events_test;
diff --git a/mysql-test/t/events_bugs.test b/mysql-test/t/events_bugs.test
index 690f9f3df6f..3f339ff0398 100644
--- a/mysql-test/t/events_bugs.test
+++ b/mysql-test/t/events_bugs.test
@@ -77,8 +77,7 @@ end|
delimiter ;|
--sleep 1
--echo "Now if everything is fine the event has compiled and is locked
---replace_column 1 # 6 #
-show processlist;
+select /*1*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
select release_lock('test_bug16407');
set global event_scheduler= 0;
select event_schema, event_name, sql_mode from information_schema.events order by event_schema, event_name;
@@ -95,7 +94,7 @@ create table events_smode_test(ev_name char(10), a date) engine=myisam;
delimiter |;
create event ee_16407_2 on schedule every 60 second do
begin
- select get_lock('ee_16407_2', 60);
+ select get_lock('ee_16407_2', 60) /*ee_16407_2*/;
insert into events_test.events_smode_test values('ee_16407_2','1980-19-02');
end|
--error ER_TRUNCATED_WRONG_VALUE
@@ -103,7 +102,7 @@ insert into events_smode_test values ('test','1980-19-02')|
--echo "This is ok"
create event ee_16407_3 on schedule every 60 second do
begin
- select get_lock('ee_16407_2', 60);
+ select get_lock('ee_16407_2', 60) /*ee_16407_3*/;
insert into events_test.events_smode_test values ('ee_16407_3','1980-02-19');
insert into events_test.events_smode_test values ('ee_16407_3','1980-02-29');
end|
@@ -111,7 +110,7 @@ set sql_mode=""|
--echo "This will insert rows but they will be truncated"
create event ee_16407_4 on schedule every 60 second do
begin
- select get_lock('ee_16407_2', 60);
+ select get_lock('ee_16407_2', 60) /*ee_16407_4*/;
insert into events_test.events_smode_test values ('ee_16407_4','10-11-1956');
end|
delimiter ;|
@@ -120,12 +119,10 @@ set sql_mode="ansi";
select get_lock('ee_16407_2', 60);
set global event_scheduler= 1;
--sleep 1
---replace_column 1 # 6 #
-show processlist;
+select /*2*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
select release_lock('ee_16407_2');
--sleep 3
---replace_column 1 # 6 #
-show processlist;
+select /*3*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
set global event_scheduler= 0;
select * from events_smode_test order by ev_name, a;
--echo "OK, last check before we drop them"
@@ -143,12 +140,12 @@ create procedure ee_16407_5_pendant() begin insert into events_test.events_smode
create procedure ee_16407_6_pendant() begin insert into events_test.events_smode_test values('ee_16407_6','2004-02-29'); end|
create event ee_16407_5 on schedule every 60 second do
begin
- select get_lock('ee_16407_5', 60);
+ select get_lock('ee_16407_5', 60) /*ee_16407_5*/;
call events_test.ee_16407_5_pendant();
end|
create event ee_16407_6 on schedule every 60 second do
begin
- select get_lock('ee_16407_5', 60);
+ select get_lock('ee_16407_5', 60) /*ee_16407_6*/;
call events_test.ee_16407_6_pendant();
end|
delimiter ;|
@@ -156,12 +153,12 @@ set sql_mode='ansi';
select get_lock('ee_16407_5', 60);
set global event_scheduler= 1;
--sleep 1
---replace_column 1 # 6 #
-show processlist;
+--echo "Should have 2 locked processes"
+select /*4*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
select release_lock('ee_16407_5');
--sleep 3
---replace_column 1 # 6 #
-show processlist;
+--echo "Should have 0 processes locked"
+select /*5*/ user, host, db, command, state, info from information_schema.processlist where info is null or info not like '%processlist%' order by info;
select * from events_smode_test order by ev_name, a;
--echo "And here we check one more time before we drop the events"
select event_schema, event_name, sql_mode from information_schema.events order by event_schema, event_name;
diff --git a/mysql-test/t/ndb_condition_pushdown.test b/mysql-test/t/ndb_condition_pushdown.test
index 9f512430085..1e6e105bc61 100644
--- a/mysql-test/t/ndb_condition_pushdown.test
+++ b/mysql-test/t/ndb_condition_pushdown.test
@@ -1639,5 +1639,15 @@ select * from t2 where (attr1 < 2) = (attr2 < 2) order by pk1;
explain
select * from t3 left join t4 on t4.attr2 = t3.attr2 where t4.attr1 > 1 and t4.attr3 < 5 or t4.attr1 is null order by t4.pk1;
+# bug#15722
+create table t5 (a int primary key auto_increment, b tinytext not null)
+engine = ndb;
+insert into t5 (b) values ('jonas'), ('jensing'), ('johan');
+set engine_condition_pushdown = off;
+select * from t5 where b like '%jo%' order by a;
+set engine_condition_pushdown = on;
+explain select * from t5 where b like '%jo%';
+select * from t5 where b like '%jo%' order by a;
+
set engine_condition_pushdown = @old_ecpd;
-DROP TABLE t1,t2,t3,t4;
+DROP TABLE t1,t2,t3,t4,t5;
diff --git a/mysql-test/t/ndb_insert.test b/mysql-test/t/ndb_insert.test
index 4fe847058c4..bf25ca9a133 100644
--- a/mysql-test/t/ndb_insert.test
+++ b/mysql-test/t/ndb_insert.test
@@ -619,7 +619,15 @@ DELETE FROM t1;
CREATE UNIQUE INDEX ai ON t1(a);
INSERT IGNORE INTO t1 VALUES (1);
INSERT IGNORE INTO t1 VALUES (1);
+INSERT IGNORE INTO t1 VALUES (NULL),(2);
SELECT * FROM t1 ORDER BY a;
DROP TABLE t1;
+# Ignore and NULL values
+CREATE TABLE t1(pk INT NOT NULL PRIMARY KEY, a INT, UNIQUE (a)) ENGINE=ndb;
+INSERT IGNORE INTO t1 VALUES (1,1),(2,2),(3,3);
+INSERT IGNORE INTO t1 VALUES (4,NULL),(5,NULL),(6,NULL),(7,4);
+SELECT * FROM t1 ORDER BY pk;
+DROP TABLE t1;
+
# End of 4.1 tests
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
index ad01977a458..c3e32e832bf 100644
--- a/mysql-test/t/partition.test
+++ b/mysql-test/t/partition.test
@@ -849,4 +849,79 @@ CALL test.p1(12);
CALL test.p1(13);
drop table t1;
+#
+# Bug#18053 Partitions: crash if null
+# Bug#18070 Partitions: wrong result on WHERE ... IS NULL
+#
+create table t1 (f1 integer) partition by range(f1)
+(partition p1 values less than (0), partition p2 values less than (10));
+insert into t1 set f1 = null;
+select * from t1 where f1 is null;
+explain partitions select * from t1 where f1 is null;
+drop table t1;
+
+create table t1 (f1 integer) partition by list(f1)
+(partition p1 values in (1), partition p2 values in (null));
+insert into t1 set f1 = null;
+insert into t1 set f1 = 1;
+select * from t1 where f1 is null or f1 = 1;
+drop table t1;
+
+create table t1 (f1 smallint)
+partition by list (f1) (partition p0 values in (null));
+insert into t1 values (null);
+select * from t1 where f1 is null;
+drop table t1;
+
+create table t1 (f1 smallint)
+partition by range (f1) (partition p0 values less than (0));
+insert into t1 values (null);
+select * from t1 where f1 is null;
+drop table t1;
+
+create table t1 (f1 integer) partition by list(f1)
+(
+ partition p1 values in (1),
+ partition p2 values in (NULL),
+ partition p3 values in (2),
+ partition p4 values in (3),
+ partition p5 values in (4)
+);
+
+insert into t1 values (1),(2),(3),(4),(null);
+select * from t1 where f1 < 3;
+explain partitions select * from t1 where f1 < 3;
+select * from t1 where f1 is null;
+explain partitions select * from t1 where f1 is null;
+drop table t1;
+
+create table t1 (f1 int) partition by list(f1 div 2)
+(
+ partition p1 values in (1),
+ partition p2 values in (NULL),
+ partition p3 values in (2),
+ partition p4 values in (3),
+ partition p5 values in (4)
+);
+
+insert into t1 values (2),(4),(6),(8),(null);
+select * from t1 where f1 < 3;
+explain partitions select * from t1 where f1 < 3;
+select * from t1 where f1 is null;
+explain partitions select * from t1 where f1 is null;
+drop table t1;
+
+create table t1 (a int) partition by LIST(a) (
+ partition pn values in (NULL),
+ partition p0 values in (0),
+ partition p1 values in (1),
+ partition p2 values in (2)
+);
+insert into t1 values (NULL),(0),(1),(2);
+select * from t1 where a is null or a < 2;
+explain partitions select * from t1 where a is null or a < 2;
+select * from t1 where a is null or a < 0 or a > 1;
+explain partitions select * from t1 where a is null or a < 0 or a > 1;
+drop table t1;
+
--echo End of 5.1 tests
diff --git a/mysql-test/t/partition_mgm_err2.test b/mysql-test/t/partition_mgm_err2.test
index 157ba4d70d2..dd96731ccdd 100644
--- a/mysql-test/t/partition_mgm_err2.test
+++ b/mysql-test/t/partition_mgm_err2.test
@@ -1,6 +1,7 @@
#
# Test of partitions that require symlinks
#
+-- source include/have_partition.inc
-- require r/have_symlink.require
disable_query_log;
diff --git a/mysql-test/t/rpl_ndb_sp007.test b/mysql-test/t/rpl_ndb_sp007.test
deleted file mode 100644
index 767fd105518..00000000000
--- a/mysql-test/t/rpl_ndb_sp007.test
+++ /dev/null
@@ -1,9 +0,0 @@
-#################################
-# Wrapper for rpl_row_sp007.test#
-# These tests have been wrapped #
-# so the same code can be used #
-# For different engines #
-#################################
--- source include/have_ndb.inc
-let $engine_type=NDBCLUSTER;
--- source extra/rpl_tests/rpl_row_sp007.test
diff --git a/sql/cmakelists.txt b/sql/cmakelists.txt
index 82268feba69..0d06a11ebba 100644
--- a/sql/cmakelists.txt
+++ b/sql/cmakelists.txt
@@ -1,5 +1,7 @@
-SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR")
-SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR")
+SET(CMAKE_CXX_FLAGS_DEBUG
+ "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR")
+SET(CMAKE_C_FLAGS_DEBUG
+ "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/extra/yassl/include
@@ -18,7 +20,8 @@ SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc
${CMAKE_SOURCE_DIR}/sql/lex_hash.h
${PROJECT_SOURCE_DIR}/include/mysqld_error.h
${PROJECT_SOURCE_DIR}/include/mysqld_ername.h
- ${PROJECT_SOURCE_DIR}/include/sql_state.h PROPERTIES GENERATED 1)
+ ${PROJECT_SOURCE_DIR}/include/sql_state.h
+ PROPERTIES GENERATED 1)
IF(WITH_ARCHIVE_STORAGE_ENGINE)
ADD_DEFINITIONS(-D WITH_ARCHIVE_STORAGE_ENGINE)
@@ -67,39 +70,50 @@ ENDIF(__NT__)
IF(CYBOZU)
ADD_DEFINITIONS(-D CYBOZU)
ENDIF(CYBOZU)
-CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/sql/handlerton.cc.in ${CMAKE_SOURCE_DIR}/sql/handlerton.cc @ONLY)
+CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/sql/handlerton.cc.in
+ ${CMAKE_SOURCE_DIR}/sql/handlerton.cc @ONLY)
-ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER -D_CONSOLE -DHAVE_DLOPEN)
+ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER
+ -D_CONSOLE -DHAVE_DLOPEN)
SEPARATE_ARGUMENTS(mysql_se_ha_src)
-ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc discover.cc ../libmysql/errmsg.c field.cc
- field_conv.cc filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc
- ${mysql_se_ha_src} handler.cc hash_filo.cc hash_filo.h hostname.cc
- init.cc item.cc item_buff.cc item_cmpfunc.cc item_create.cc item_func.cc item_geofunc.cc
- item_row.cc item_strfunc.cc item_subselect.cc item_sum.cc item_timefunc.cc item_uniq.cc
- key.cc log.cc lock.cc log_event.cc message.rc message.h mf_iocache.cc
- my_decimal.cc ../sql-common/my_time.c
- ../storage/myisammrg/myrg_rnext_same.c mysqld.cc net_serv.cc nt_servc.cc nt_servc.h opt_range.cc opt_range.h
- opt_sum.cc ../sql-common/pack.c parse_file.cc password.c procedure.cc protocol.cc records.cc
- repl_failsafe.cc rpl_filter.cc set_var.cc slave.cc sp.cc sp_cache.cc sp_head.cc
- sp_pcontext.cc sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc sql_cache.cc
- sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h sql_cursor.cc sql_db.cc sql_delete.cc
- sql_derived.cc sql_do.cc sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc
- sql_lex.cc sql_list.cc sql_load.cc sql_manager.cc sql_map.cc sql_parse.cc sql_partition.cc
- sql_plugin.cc sql_prepare.cc sql_rename.cc sql_repl.cc sql_select.cc sql_show.cc
- sql_state.c sql_string.cc sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
- sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc time.cc tztime.cc
- uniques.cc unireg.cc item_xmlfunc.cc rpl_tblmap.cc sql_binlog.cc event_executor.cc
- event_timed.cc sql_tablespace.cc event.cc ../sql-common/my_user.c partition_info.cc
- ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
- ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h
- ${PROJECT_SOURCE_DIR}/include/mysqld_error.h
- ${PROJECT_SOURCE_DIR}/include/mysqld_ername.h
- ${PROJECT_SOURCE_DIR}/include/sql_state.h
- ${PROJECT_SOURCE_DIR}/include/mysql_version.h
- ${PROJECT_SOURCE_DIR}/sql/handlerton.cc
- ${PROJECT_SOURCE_DIR}/sql/lex_hash.h)
-TARGET_LINK_LIBRARIES(mysqld heap myisam myisammrg mysys yassl zlib dbug yassl taocrypt strings vio regex wsock32)
+ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc
+ discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
+ filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc
+ ${mysql_se_ha_src} handler.cc hash_filo.cc hash_filo.h
+ hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc
+ item_create.cc item_func.cc item_geofunc.cc item_row.cc
+ item_strfunc.cc item_subselect.cc item_sum.cc item_timefunc.cc
+ item_uniq.cc key.cc log.cc lock.cc log_event.cc message.rc
+ message.h mf_iocache.cc my_decimal.cc ../sql-common/my_time.c
+ ../storage/myisammrg/myrg_rnext_same.c mysqld.cc net_serv.cc
+ nt_servc.cc nt_servc.h opt_range.cc opt_range.h opt_sum.cc
+ ../sql-common/pack.c parse_file.cc password.c procedure.cc
+ protocol.cc records.cc repl_failsafe.cc rpl_filter.cc set_var.cc
+ slave.cc sp.cc sp_cache.cc sp_head.cc sp_pcontext.cc
+ sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc
+ sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h
+ sql_cursor.cc sql_db.cc sql_delete.cc sql_derived.cc sql_do.cc
+ sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc sql_lex.cc
+ sql_list.cc sql_load.cc sql_manager.cc sql_map.cc sql_parse.cc
+ sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc
+ sql_repl.cc sql_select.cc sql_show.cc sql_state.c sql_string.cc
+ sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
+ sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
+ time.cc tztime.cc uniques.cc unireg.cc item_xmlfunc.cc
+ rpl_tblmap.cc sql_binlog.cc event_executor.cc event_timed.cc
+ sql_tablespace.cc event.cc ../sql-common/my_user.c
+ partition_info.cc
+ ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
+ ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h
+ ${PROJECT_SOURCE_DIR}/include/mysqld_error.h
+ ${PROJECT_SOURCE_DIR}/include/mysqld_ername.h
+ ${PROJECT_SOURCE_DIR}/include/sql_state.h
+ ${PROJECT_SOURCE_DIR}/include/mysql_version.h
+ ${PROJECT_SOURCE_DIR}/sql/handlerton.cc
+ ${PROJECT_SOURCE_DIR}/sql/lex_hash.h)
+TARGET_LINK_LIBRARIES(mysqld heap myisam myisammrg mysys yassl zlib dbug yassl
+ taocrypt strings vio regex wsock32)
IF(WITH_ARCHIVE_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld archive)
ENDIF(WITH_ARCHIVE_STORAGE_ENGINE)
@@ -119,7 +133,8 @@ ADD_DEPENDENCIES(mysqld GenError)
ADD_CUSTOM_COMMAND(
SOURCE ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy
OUTPUT ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc
- COMMAND bison.exe ARGS -y -p MYSQL --defines=sql_yacc.h --output=sql_yacc.cc sql_yacc.yy
+ COMMAND bison.exe ARGS -y -p MYSQL --defines=sql_yacc.h
+ --output=sql_yacc.cc sql_yacc.yy
DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy)
ADD_CUSTOM_COMMAND(
diff --git a/sql/field.cc b/sql/field.cc
index 65c0d1b9397..708b239b494 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -4245,6 +4245,11 @@ longlong Field_double::val_int(void)
else
#endif
doubleget(j,ptr);
+ /* Check whether we fit into longlong range */
+ if (j <= (double) LONGLONG_MIN)
+ return (longlong) LONGLONG_MIN;
+ if (j >= (double) (ulonglong) LONGLONG_MAX)
+ return (longlong) LONGLONG_MAX;
return (longlong) rint(j);
}
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 96f760a7a44..02af9fb6918 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -524,7 +524,6 @@ THR_LOCK_DATA **ha_heap::store_lock(THD *thd,
int ha_heap::delete_table(const char *name)
{
- char buff[FN_REFLEN];
int error= heap_delete_table(name);
return error == ENOENT ? 0 : error;
}
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 795da2bdec2..1a174867189 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -74,6 +74,8 @@ handlerton ndbcluster_hton = {
DB_TYPE_NDBCLUSTER,
ndbcluster_init,
~(uint)0, /* slot */
+ /* below are initialized by name in ndbcluster_init() */
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
};
static handler *ndbcluster_create_handler(TABLE_SHARE *table)
@@ -2336,7 +2338,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key,
if (generate_scan_filter(m_cond_stack, op))
DBUG_RETURN(ndb_err(trans));
- if (res= define_read_attrs(buf, op))
+ if ((res= define_read_attrs(buf, op)))
{
DBUG_RETURN(res);
}
@@ -3395,6 +3397,7 @@ int ha_ndbcluster::rnd_pos(byte *buf, byte *pos)
The partition id has been fetched from ndb
and has been stored directly after the hidden key
*/
+ DBUG_DUMP("key+part", (char *)pos, key_length);
key_length= ref_length - sizeof(m_part_id);
part_spec.start_part= part_spec.end_part= *(pos + key_length);
}
@@ -3411,6 +3414,7 @@ int ha_ndbcluster::rnd_pos(byte *buf, byte *pos)
}
DBUG_PRINT("info", ("partition id %u", part_spec.start_part));
}
+ DBUG_DUMP("key", (char *)pos, key_length);
DBUG_RETURN(pk_read(pos, key_length, buf, part_spec.start_part));
}
}
@@ -3496,7 +3500,10 @@ void ha_ndbcluster::position(const byte *record)
#endif
memcpy(ref, m_ref, key_length);
}
-
+#ifndef DBUG_OFF
+ if (table_share->primary_key == MAX_KEY && m_use_partition_function)
+ DBUG_DUMP("key+part", (char*)ref, key_length+sizeof(m_part_id));
+#endif
DBUG_DUMP("ref", (char*)ref, key_length);
DBUG_VOID_RETURN;
}
@@ -8194,7 +8201,12 @@ void ndb_serialize_cond(const Item *item, void *arg)
context->expecting_field_result(INT_RESULT))
: true)) &&
// Bit fields no yet supported in scan filter
- type != MYSQL_TYPE_BIT)
+ type != MYSQL_TYPE_BIT &&
+ // No BLOB support in scan filter
+ type != MYSQL_TYPE_TINY_BLOB &&
+ type != MYSQL_TYPE_MEDIUM_BLOB &&
+ type != MYSQL_TYPE_LONG_BLOB &&
+ type != MYSQL_TYPE_BLOB)
{
const NDBCOL *col= tab->getColumn(field->field_name);
DBUG_ASSERT(col);
@@ -9235,7 +9247,7 @@ char* ha_ndbcluster::get_tablespace_name(THD *thd)
{
NdbDictionary::Tablespace ts= ndbdict->getTablespace(id);
ndberr= ndbdict->getNdbError();
- if(ndberr.classification != ndberror_cl_none)
+ if(ndberr.classification != NdbError::NoError)
goto err;
return (my_strdup(ts.getName(), MYF(0)));
}
diff --git a/sql/handler.cc b/sql/handler.cc
index 16381972200..782d367bf40 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -445,8 +445,6 @@ static my_bool init_handlerton(THD *unused1, st_plugin_int *plugin,
int ha_init()
{
int error= 0;
- handlerton **types;
- show_table_alias_st *table_alias;
total_ha= savepoint_alloc_size= 0;
if (ha_init_errors())
diff --git a/sql/handlerton-win.cc b/sql/handlerton-win.cc
new file mode 100644
index 00000000000..9ce4eab2444
--- /dev/null
+++ b/sql/handlerton-win.cc
@@ -0,0 +1,72 @@
+#include "mysql_priv.h"
+
+extern handlerton heap_hton;
+extern handlerton myisam_hton;
+extern handlerton myisammrg_hton;
+extern handlerton binlog_hton;
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
+extern handlerton innobase_hton;
+#endif
+#ifdef WITH_BERKELEY_STORAGE_ENGINE
+extern handlerton berkeley_hton;
+#endif
+#ifdef WITH_EXAMPLE_STORAGE_ENGINE
+extern handlerton example_hton;
+#endif
+#ifdef WITH_ARCHIVE_STORAGE_ENGINE
+extern handlerton archive_hton;
+#endif
+#ifdef WITH_CSV_STORAGE_ENGINE
+extern handlerton tina_hton;
+#endif
+#ifdef WITH_BLACKHOLE_STORAGE_ENGINE
+extern handlerton blackhole_hton;
+#endif
+#ifdef WITH_FEDERATED_STORAGE_ENGINE
+extern handlerton federated_hton;
+#endif
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
+extern handlerton ndbcluster_hton;
+#endif
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+extern handlerton partition_hton;
+#endif
+
+/*
+ This array is used for processing compiled in engines.
+*/
+handlerton *sys_table_types[]=
+{
+ &heap_hton,
+ &myisam_hton,
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
+ &innobase_hton,
+#endif
+#ifdef WITH_BERKELEY_STORAGE_ENGINE
+ &berkeley_hton,
+#endif
+#ifdef WITH_EXAMPLE_STORAGE_ENGINE
+ &example_hton,
+#endif
+#ifdef WITH_ARCHIVE_STORAGE_ENGINE
+ &archive_hton,
+#endif
+#ifdef WITH_CSV_STORAGE_ENGINE
+ &tina_hton,
+#endif
+#ifdef WITH_BLACKHOLE_STORAGE_ENGINE
+ &blackhole_hton,
+#endif
+#ifdef WITH_FEDERATED_STORAGE_ENGINE
+ &federated_hton,
+#endif
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
+ &ndbcluster_hton,
+#endif
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ &partition_hton,
+#endif
+ &myisammrg_hton,
+ &binlog_hton,
+ NULL
+};
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index c4f7dec70d0..7ee206a4195 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -2028,7 +2028,6 @@ void Item_date_add_interval::fix_length_and_dec()
bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date)
{
- long period,sign;
INTERVAL interval;
if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE) ||
diff --git a/sql/key.cc b/sql/key.cc
index 4001c6177a1..fd9e8a89b17 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -468,7 +468,6 @@ int key_rec_cmp(void *key, byte *first_rec, byte *second_rec)
do
{
Field *field= key_part->field;
- uint length;
if (key_part->null_bit)
{
diff --git a/sql/log.cc b/sql/log.cc
index 02bf5ec3015..4b5fcffffbf 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -717,8 +717,6 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length,
bool error= FALSE;
Log_event_handler **current_handler= slow_log_handler_list;
bool is_command= FALSE;
-
- char message_buff[MAX_LOG_BUFFER_SIZE];
char user_host_buff[MAX_USER_HOST_SIZE];
my_time_t current_time;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 99948bff8d2..584c3727dd6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -8183,6 +8183,7 @@ ulong srv_thread_sleep_delay;
ulong srv_thread_concurrency;
ulong srv_commit_concurrency;
}
+
#endif
#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index e9ebd9cbccb..36de27ef3e3 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -2296,6 +2296,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
RANGE_OPT_PARAM *range_par= &prune_param.range_param;
prune_param.part_info= part_info;
+ prune_param.part_iter.has_null_value= FALSE;
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
range_par->mem_root= &alloc;
diff --git a/sql/sp.cc b/sql/sp.cc
index 022417d15d1..ac63847daa3 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -668,7 +668,6 @@ db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
{
TABLE *table;
int ret;
- bool opened;
DBUG_ENTER("db_update_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",
type, name->m_name.length, name->m_name.str));
diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc
index fea6a67f32c..f5912caddaf 100644
--- a/sql/sp_cache.cc
+++ b/sql/sp_cache.cc
@@ -124,7 +124,6 @@ void sp_cache_clear(sp_cache **cp)
void sp_cache_insert(sp_cache **cp, sp_head *sp)
{
sp_cache *c;
- ulong v;
if (!(c= *cp))
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 6c2ca06164f..9f01f763f41 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1408,10 +1408,7 @@ void update_non_unique_table_error(TABLE_LIST *update,
TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name)
{
- char key[MAX_DBKEY_LENGTH];
- uint key_length;
TABLE_LIST table_list;
- TABLE *table;
table_list.db= (char*) db;
table_list.table_name= (char*) table_name;
@@ -1939,7 +1936,6 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
}
else
{
- TABLE_SHARE *share;
/* Free cache if too big */
while (open_cache.records > table_cache_size && unused_tables)
VOID(hash_delete(&open_cache,(byte*) unused_tables)); /* purecov: tested */
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 682c74dc31e..d8a886d2227 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -2868,9 +2868,6 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
/* Get the partitioning function value for the endpoint */
longlong part_func_value= part_val_int(part_info->part_expr);
- if (part_info->part_expr->null_value)
- DBUG_RETURN(0);
-
while (max_part_id > min_part_id)
{
loc_part_id= (max_part_id + min_part_id + 1) >> 1;
@@ -5745,6 +5742,19 @@ int get_part_iter_for_interval_via_mapping(partition_info *part_info,
else
DBUG_ASSERT(0);
+ if (field->real_maybe_null() && part_info->has_null_value)
+ {
+ if (*min_value)
+ {
+ if (*max_value && !(flags & (NO_MIN_RANGE | NO_MAX_RANGE)))
+ {
+ init_single_partition_iterator(part_info->has_null_part_id, part_iter);
+ return 1;
+ }
+ if (!(flags & NEAR_MIN))
+ part_iter->has_null_value= TRUE;
+ }
+ }
/* Find minimum */
if (flags & NO_MIN_RANGE)
part_iter->part_nums.start= 0;
@@ -5956,7 +5966,14 @@ uint32 get_next_partition_id_range(PARTITION_ITERATOR* part_iter)
uint32 get_next_partition_id_list(PARTITION_ITERATOR *part_iter)
{
if (part_iter->part_nums.start == part_iter->part_nums.end)
+ {
+ if (part_iter->has_null_value)
+ {
+ part_iter->has_null_value= FALSE;
+ return part_iter->part_info->has_null_part_id;
+ }
return NOT_A_PARTITION_ID;
+ }
else
return part_iter->part_info->list_array[part_iter->
part_nums.start++].partition_id;
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index 4982b1fcf1f..c214be81ae4 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -124,7 +124,7 @@ typedef uint32 (*partition_iter_func)(st_partition_iter* part_iter);
typedef struct st_partition_iter
{
partition_iter_func get_next;
-
+ bool has_null_value;
struct st_part_num_range
{
uint32 start;
diff --git a/storage/csv/cmakelists.txt b/storage/csv/cmakelists.txt
new file mode 100644
index 00000000000..4e142646b2d
--- /dev/null
+++ b/storage/csv/cmakelists.txt
@@ -0,0 +1,5 @@
+SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
+SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
+
+INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include)
+ADD_LIBRARY(csv ha_tina.cc ha_tina.h)
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index 066a3011381..5f7c44f1074 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -53,6 +53,24 @@ TODO:
#include <mysql/plugin.h>
+/*
+ uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
+*/
+#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
+ + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
+#define TINA_CHECK_HEADER 254 // The number we use to determine corruption
+
+/* The file extension */
+#define CSV_EXT ".CSV" // The data file
+#define CSN_EXT ".CSN" // Files used during repair
+#define CSM_EXT ".CSM" // Meta file
+
+
+static TINA_SHARE *get_share(const char *table_name, TABLE *table);
+static int free_share(TINA_SHARE *share);
+static int read_meta_file(File meta_file, ha_rows *rows);
+static int write_meta_file(File meta_file, ha_rows rows, bool dirty);
+
/* Stuff for shares */
pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
@@ -197,6 +215,7 @@ static int tina_done_func()
static TINA_SHARE *get_share(const char *table_name, TABLE *table)
{
TINA_SHARE *share;
+ char meta_file_name[FN_REFLEN];
char *tmp_name;
uint length;
@@ -212,9 +231,8 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
*/
if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
(byte*) table_name,
- length)))
+ length)))
{
- char data_file_name[FN_REFLEN];
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
@@ -228,15 +246,39 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table)
share->is_log_table= FALSE;
share->table_name_length= length;
share->table_name= tmp_name;
+ share->crashed= FALSE;
+ share->rows_recorded= 0;
strmov(share->table_name, table_name);
- fn_format(data_file_name, table_name, "", ".CSV",
+ fn_format(share->data_file_name, table_name, "", CSV_EXT,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ fn_format(meta_file_name, table_name, "", CSM_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if (my_hash_insert(&tina_open_tables, (byte*) share))
goto error;
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
- if ((share->data_file= my_open(data_file_name, O_RDWR|O_APPEND,
+ /*
+ Open or create the meta file. In the latter case, we'll get
+ an error during read_meta_file and mark the table as crashed.
+ Usually this will result in auto-repair, and we will get a good
+ meta-file in the end.
+ */
+ if ((share->meta_file= my_open(meta_file_name,
+ O_RDWR|O_CREAT, MYF(0))) == -1)
+ share->crashed= TRUE;
+
+ /*
+ After we read, we set the file to dirty. When we close, we will do the
+ opposite. If the meta file will not open we assume it is crashed and
+ mark it as such.
+ */
+ if (read_meta_file(share->meta_file, &share->rows_recorded))
+ share->crashed= TRUE;
+ else
+ (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
+
+ if ((share->data_file= my_open(share->data_file_name, O_RDWR|O_APPEND,
MYF(0))) == -1)
goto error2;
@@ -273,6 +315,128 @@ error:
/*
+ Read CSV meta-file
+
+ SYNOPSIS
+ read_meta_file()
+ meta_file The meta-file filedes
+ ha_rows Pointer to the var we use to store rows count.
+ These are read from the meta-file.
+
+ DESCRIPTION
+
+ Read the meta-file info. For now we are only interested in
+ rows counf, crashed bit and magic number.
+
+ RETURN
+ 0 - OK
+ non-zero - error occurred
+*/
+
+static int read_meta_file(File meta_file, ha_rows *rows)
+{
+ uchar meta_buffer[META_BUFFER_SIZE];
+ uchar *ptr= meta_buffer;
+
+ DBUG_ENTER("ha_tina::read_meta_file");
+
+ VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
+ if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0)
+ != META_BUFFER_SIZE)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
+ /*
+ Parse out the meta data, we ignore version at the moment
+ */
+
+ ptr+= sizeof(uchar)*2; // Move past header
+ *rows= (ha_rows)uint8korr(ptr);
+ ptr+= sizeof(ulonglong); // Move past rows
+ /*
+ Move past check_point, auto_increment and forced_flushes fields.
+ They are present in the format, but we do not use them yet.
+ */
+ ptr+= 3*sizeof(ulonglong);
+
+ /* check crashed bit and magic number */
+ if ((meta_buffer[0] != (uchar)TINA_CHECK_HEADER) ||
+ ((bool)(*ptr)== TRUE))
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
+ my_sync(meta_file, MYF(MY_WME));
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Write CSV meta-file
+
+ SYNOPSIS
+ write_meta_file()
+ meta_file The meta-file filedes
+ ha_rows The number of rows we have in the datafile.
+ dirty A flag, which marks whether we have a corrupt table
+
+ DESCRIPTION
+
+ Write meta-info the the file. Only rows count, crashed bit and
+ magic number matter now.
+
+ RETURN
+ 0 - OK
+ non-zero - error occurred
+*/
+
+static int write_meta_file(File meta_file, ha_rows rows, bool dirty)
+{
+ uchar meta_buffer[META_BUFFER_SIZE];
+ uchar *ptr= meta_buffer;
+
+ DBUG_ENTER("ha_tina::write_meta_file");
+
+ *ptr= (uchar)TINA_CHECK_HEADER;
+ ptr+= sizeof(uchar);
+ *ptr= (uchar)TINA_VERSION;
+ ptr+= sizeof(uchar);
+ int8store(ptr, (ulonglong)rows);
+ ptr+= sizeof(ulonglong);
+ memset(ptr, 0, 3*sizeof(ulonglong));
+ /*
+ Skip over checkpoint, autoincrement and forced_flushes fields.
+ We'll need them later.
+ */
+ ptr+= 3*sizeof(ulonglong);
+ *ptr= (uchar)dirty;
+
+ VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
+ if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0)
+ != META_BUFFER_SIZE)
+ DBUG_RETURN(-1);
+
+ my_sync(meta_file, MYF(MY_WME));
+
+ DBUG_RETURN(0);
+}
+
+bool ha_tina::check_and_repair(THD *thd)
+{
+ HA_CHECK_OPT check_opt;
+ DBUG_ENTER("ha_tina::check_and_repair");
+
+ check_opt.init();
+
+ DBUG_RETURN(repair(thd, &check_opt));
+}
+
+
+bool ha_tina::is_crashed() const
+{
+ DBUG_ENTER("ha_tina::is_crashed");
+ DBUG_RETURN(share->crashed);
+}
+
+/*
Free lock controls.
*/
static int free_share(TINA_SHARE *share)
@@ -281,7 +445,11 @@ static int free_share(TINA_SHARE *share)
pthread_mutex_lock(&tina_mutex);
int result_code= 0;
if (!--share->use_count){
- /* Drop the mapped file */
+ /* Write the meta file. Mark it as crashed if needed. */
+ (void)write_meta_file(share->meta_file, share->rows_recorded,
+ share->crashed ? TRUE :FALSE);
+ if (my_close(share->meta_file, MYF(0)))
+ result_code= 1;
if (share->mapped_file)
my_munmap(share->mapped_file, share->file_stat.st_size);
result_code= my_close(share->data_file,MYF(0));
@@ -410,7 +578,7 @@ int ha_tina::encode_quote(byte *buf)
/*
chain_append() adds delete positions to the chain that we use to keep
- track of space. Then the chain will be used to cleanup "holes", occured
+ track of space. Then the chain will be used to cleanup "holes", occurred
due to deletes and updates.
*/
int ha_tina::chain_append()
@@ -472,7 +640,10 @@ int ha_tina::find_current_row(byte *buf)
for (Field **field=table->field ; *field ; field++)
{
buffer.length(0);
- mapped_ptr++; // Increment past the first quote
+ if (*mapped_ptr == '"')
+ mapped_ptr++; // Increment past the first quote
+ else
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
for(;mapped_ptr != end_ptr; mapped_ptr++)
{
// Need to convert line feeds!
@@ -498,8 +669,16 @@ int ha_tina::find_current_row(byte *buf)
buffer.append(*mapped_ptr);
}
}
- else
+ else // ordinary symbol
+ {
+ /*
+ We are at final symbol and no last quote was found =>
+ we are working with a damaged file.
+ */
+ if (mapped_ptr == end_ptr -1)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
buffer.append(*mapped_ptr);
+ }
}
(*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
}
@@ -515,7 +694,8 @@ int ha_tina::find_current_row(byte *buf)
extensions exist for this handler.
*/
static const char *ha_tina_exts[] = {
- ".CSV",
+ CSV_EXT,
+ CSM_EXT,
NullS
};
@@ -638,12 +818,18 @@ bool ha_tina::check_if_locking_is_allowed(uint sql_command,
this will not be called for every request. Any sort of positions
that need to be reset should be kept in the ::extra() call.
*/
-int ha_tina::open(const char *name, int mode, uint test_if_locked)
+int ha_tina::open(const char *name, int mode, uint open_options)
{
DBUG_ENTER("ha_tina::open");
if (!(share= get_share(name, table)))
- DBUG_RETURN(1);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR))
+ {
+ free_share(share);
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ }
/*
Init locking. Pass handler object to the locking routines,
@@ -681,6 +867,9 @@ int ha_tina::write_row(byte * buf)
int size;
DBUG_ENTER("ha_tina::write_row");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
ha_statistic_increment(&SSV::ha_write_count);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
@@ -704,13 +893,13 @@ int ha_tina::write_row(byte * buf)
/* update local copy of the max position to see our own changes */
local_saved_data_file_length= share->file_stat.st_size;
+ /* update shared info */
+ pthread_mutex_lock(&share->mutex);
+ share->rows_recorded++;
/* update status for the log tables */
if (share->is_log_table)
- {
- pthread_mutex_lock(&share->mutex);
update_status();
- pthread_mutex_unlock(&share->mutex);
- }
+ pthread_mutex_unlock(&share->mutex);
records++;
DBUG_RETURN(0);
@@ -814,6 +1003,9 @@ int ha_tina::rnd_init(bool scan)
{
DBUG_ENTER("ha_tina::rnd_init");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
current_position= next_position= 0;
records= 0;
records_is_known= 0;
@@ -843,15 +1035,19 @@ int ha_tina::rnd_init(bool scan)
*/
int ha_tina::rnd_next(byte *buf)
{
+ int rc;
DBUG_ENTER("ha_tina::rnd_next");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= next_position;
if (!share->mapped_file)
DBUG_RETURN(HA_ERR_END_OF_FILE);
- if (HA_ERR_END_OF_FILE == find_current_row(buf) )
- DBUG_RETURN(HA_ERR_END_OF_FILE);
+ if ((rc= find_current_row(buf)))
+ DBUG_RETURN(rc);
records++;
DBUG_RETURN(0);
@@ -975,7 +1171,105 @@ int ha_tina::rnd_end()
}
-/*
+/*
+ Repair CSV table in the case, it is crashed.
+
+ SYNOPSIS
+ repair()
+ thd The thread, performing repair
+ check_opt The options for repair. We do not use it currently.
+
+ DESCRIPTION
+ If the file is empty, change # of rows in the file and complete recovery.
+ Otherwise, scan the table looking for bad rows. If none were found,
+ we mark file as a good one and return. If a bad row was encountered,
+ we truncate the datafile up to the last good row.
+
+ TODO: Make repair more clever - it should try to recover subsequent
+ rows (after the first bad one) as well.
+*/
+
+int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ char repaired_fname[FN_REFLEN];
+ byte *buf;
+ File repair_file;
+ int rc;
+ ha_rows rows_repaired= 0;
+ DBUG_ENTER("ha_tina::repair");
+
+ /* empty file */
+ if (!share->mapped_file)
+ {
+ share->rows_recorded= 0;
+ goto end;
+ }
+
+ if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ /*
+ Local_saved_data_file_length is initialized during the lock phase.
+ Sometimes this is not getting executed before ::repair (e.g. for
+ the log tables). We set it manually here.
+ */
+ local_saved_data_file_length= share->file_stat.st_size;
+ /* set current position to the beginning of the file */
+ current_position= next_position= 0;
+
+ /* Read the file row-by-row. If everything is ok, repair is not needed. */
+ while (!(rc= find_current_row(buf)))
+ {
+ rows_repaired++;
+ current_position= next_position;
+ }
+
+ my_free((char*)buf, MYF(0));
+
+ /* The file is ok */
+ if (rc == HA_ERR_END_OF_FILE)
+ {
+ /*
+ If rows_recorded != rows_repaired, we should update
+ rows_recorded value to the current amount of rows.
+ */
+ share->rows_recorded= rows_repaired;
+ goto end;
+ }
+
+ /*
+ Otherwise we've encountered a bad row => repair is needed.
+ Let us create a temporary file.
+ */
+ if ((repair_file= my_create(fn_format(repaired_fname, share->table_name,
+ "", CSN_EXT,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME),
+ 0, O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
+
+ if (my_write(repair_file, (byte*)share->mapped_file, current_position,
+ MYF(MY_NABP)))
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ my_close(repair_file, MYF(0));
+ /* we just truncated the file up to the first bad row. update rows count. */
+ share->rows_recorded= rows_repaired;
+
+ if (my_munmap(share->mapped_file, share->file_stat.st_size))
+ DBUG_RETURN(-1);
+
+ my_rename(repaired_fname, share->data_file_name, MYF(0));
+
+ /* We set it to null so that get_mmap() won't try to unmap it */
+ share->mapped_file= NULL;
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+
+end:
+ share->crashed= FALSE;
+ DBUG_RETURN(HA_ADMIN_OK);
+}
+
+/*
DELETE without WHERE calls this
*/
@@ -1021,16 +1315,64 @@ int ha_tina::create(const char *name, TABLE *table_arg,
File create_file;
DBUG_ENTER("ha_tina::create");
- if ((create_file= my_create(fn_format(name_buff, name, "", ".CSV",
+ if ((create_file= my_create(fn_format(name_buff, name, "", CSM_EXT,
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME), 0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ DBUG_RETURN(-1);
+
+ write_meta_file(create_file, 0, FALSE);
+ my_close(create_file, MYF(0));
+
+ if ((create_file= my_create(fn_format(name_buff, name, "", CSV_EXT,
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
DBUG_RETURN(-1);
- my_close(create_file,MYF(0));
+ my_close(create_file, MYF(0));
DBUG_RETURN(0);
}
+int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ int rc= 0;
+ byte *buf;
+ const char *old_proc_info;
+ ha_rows count= share->rows_recorded;
+ DBUG_ENTER("ha_tina::check");
+
+ old_proc_info= thd_proc_info(thd, "Checking table");
+ if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+
+ /*
+ Local_saved_data_file_length is initialized during the lock phase.
+ Check does not use store_lock in certain cases. So, we set it
+ manually here.
+ */
+ local_saved_data_file_length= share->file_stat.st_size;
+ /* set current position to the beginning of the file */
+ current_position= next_position= 0;
+ /* Read the file row-by-row. If everything is ok, repair is not needed. */
+ while (!(rc= find_current_row(buf)))
+ {
+ count--;
+ current_position= next_position;
+ }
+
+ my_free((char*)buf, MYF(0));
+ thd_proc_info(thd, old_proc_info);
+
+ if ((rc != HA_ERR_END_OF_FILE) || count)
+ {
+ share->crashed= TRUE;
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+ }
+ else
+ DBUG_RETURN(HA_ADMIN_OK);
+}
+
+
bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index 572d05cb779..a60acc62a1e 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -19,9 +19,16 @@
#include <my_dir.h>
#define DEFAULT_CHAIN_LENGTH 512
+/*
+ Version for file format.
+ 1 - Initial Version. That is, the version when the metafile was introduced.
+*/
+
+#define TINA_VERSION 1
typedef struct st_tina_share {
char *table_name;
+ char data_file_name[FN_REFLEN];
byte *mapped_file; /* mapped region of file */
uint table_name_length, use_count;
/*
@@ -39,6 +46,9 @@ typedef struct st_tina_share {
off_t saved_data_file_length;
pthread_mutex_t mutex;
THR_LOCK lock;
+ File meta_file; /* Meta file we use */
+ bool crashed; /* Meta file is crashed */
+ ha_rows rows_recorded; /* Number of rows in tables */
} TINA_SHARE;
typedef struct tina_set {
@@ -108,7 +118,7 @@ public:
ulong type, TABLE *table,
uint count,
bool called_by_logger_thread);
- int open(const char *name, int mode, uint test_if_locked);
+ int open(const char *name, int mode, uint open_options);
int close(void);
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
@@ -116,7 +126,13 @@ public:
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
+ bool check_and_repair(THD *thd);
+ int check(THD* thd, HA_CHECK_OPT* check_opt);
+ bool is_crashed() const;
int rnd_end();
+ int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ /* This is required for SQL layer to know that we support autorepair */
+ bool auto_repair() const { return 1; }
void position(const byte *record);
void info(uint);
int extra(enum ha_extra_function operation);
diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp
index 6e7da27cab2..2f2a28fbff2 100644
--- a/storage/ndb/test/ndbapi/test_event.cpp
+++ b/storage/ndb/test/ndbapi/test_event.cpp
@@ -1537,7 +1537,7 @@ static int runCreateDropNR(NDBT_Context* ctx, NDBT_Step* step)
break;
}
NdbDictionary::Table tmp(*pTab);
- tmp.setNodeGroupIds(0, 0);
+ //tmp.setNodeGroupIds(0, 0);
if (ndb->getDictionary()->createTable(tmp) != 0){
g_err << "createTable failed: "
<< ndb->getDictionary()->getNdbError() << endl;
diff --git a/win/Makefile.am b/win/Makefile.am
index 8442214bb97..2f4aa626a93 100644
--- a/win/Makefile.am
+++ b/win/Makefile.am
@@ -15,7 +15,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
## Process this file with automake to create Makefile.in
-EXTRA_DIST = build-vs71.bat build-vs8.bat config-handlerton.js configure.js config-version.js README
+EXTRA_DIST = build-vs71.bat build-vs8.bat configure.js README
# Don't update the files from bitkeeper
%::SCCS/s.%