summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <monty@hundin.mysql.fi>2001-11-22 17:55:18 +0200
committerunknown <monty@hundin.mysql.fi>2001-11-22 17:55:18 +0200
commit38357b30e6ccf4278807cbd8b91ca14efe3fff15 (patch)
tree5c88c46555c8f2f0d49c779e6347d2d6139a84e0
parente673b6dcf8b55ec4457ab554a1c09ccf3c079463 (diff)
downloadmariadb-git-38357b30e6ccf4278807cbd8b91ca14efe3fff15.tar.gz
Fixed fulltext after merge from 3.23.45
First (incomplete) version of transaction and as3ap tests. BitKeeper/etc/ignore: Added sql-bench/test-transactions to the ignore list mysql-test/r/fulltext.result: Update fulltext results after merge sql-bench/Makefile.am: Added transaction test sql-bench/run-all-tests.sh: Added transaction test sql-bench/server-cfg.sh: Added transaction test sql-bench/test-ATIS.sh: Cleanup sql/item_func.cc: Fix bad merge sql/mysqld.cc: Cleanup sql/sql_base.cc: Fix bad merge sql/sql_delete.cc: Cleanup sql/sql_parse.cc: Fix bad merge sql/sql_select.cc: Fix bad merge sql/sql_union.cc: Fix bad merge tools/mysqlmanager.c: C
-rw-r--r--.bzrignore1
-rw-r--r--mysql-test/r/fulltext.result7
-rw-r--r--sql-bench/Makefile.am4
-rw-r--r--sql-bench/as3ap.sh636
-rw-r--r--sql-bench/run-all-tests.sh4
-rw-r--r--sql-bench/server-cfg.sh30
-rw-r--r--sql-bench/test-ATIS.sh3
-rw-r--r--sql-bench/test-transactions.sh268
-rw-r--r--sql/item_func.cc5
-rw-r--r--sql/mysqld.cc88
-rw-r--r--sql/sql_base.cc6
-rw-r--r--sql/sql_delete.cc14
-rw-r--r--sql/sql_parse.cc2
-rw-r--r--sql/sql_select.cc38
-rw-r--r--sql/sql_union.cc8
-rw-r--r--tools/mysqlmanager.c2
16 files changed, 1030 insertions, 86 deletions
diff --git a/.bzrignore b/.bzrignore
index 936f4c48bb0..b741024bb95 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -445,3 +445,4 @@ vio/test-ssl
vio/test-sslclient
vio/test-sslserver
vio/viotest-ssl
+sql-bench/test-transactions
diff --git a/mysql-test/r/fulltext.result b/mysql-test/r/fulltext.result
index dbc0ac09f31..17885290d46 100644
--- a/mysql-test/r/fulltext.result
+++ b/mysql-test/r/fulltext.result
@@ -56,6 +56,13 @@ Only MyISAM tables support collections 2
Function MATCH ... AGAINST() is used to do a search 0
Full-text search in MySQL implements vector space model 0
delete from t1 where a like "MySQL%";
+update t1 set a='some test foobar' where MATCH a,b AGAINST ('model');
+delete from t1 where MATCH(a,b) AGAINST ("indexes");
+select * from t1;
+a b
+Only MyISAM tables support collections
+Function MATCH ... AGAINST() is used to do a search
+some test foobar implements vector space model
drop table t1;
CREATE TABLE t1 (
id int(11),
diff --git a/sql-bench/Makefile.am b/sql-bench/Makefile.am
index e9c3e07beef..4bfa9868428 100644
--- a/sql-bench/Makefile.am
+++ b/sql-bench/Makefile.am
@@ -21,13 +21,15 @@ benchdir_root= $(prefix)
benchdir = $(benchdir_root)/sql-bench
bench_SCRIPTS = test-ATIS test-connect test-create test-insert \
test-big-tables test-select test-wisconsin \
- test-alter-table graph-compare-results \
+ test-alter-table test-transactions \
+ graph-compare-results \
bench-init.pl compare-results run-all-tests \
server-cfg crash-me copy-db bench-count-distinct
CLEANFILES = $(bench_SCRIPTS)
EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \
test-insert.sh test-big-tables.sh test-select.sh \
test-alter-table.sh test-wisconsin.sh \
+ test-transactions.sh \
bench-init.pl.sh compare-results.sh server-cfg.sh \
run-all-tests.sh crash-me.sh copy-db.sh \
bench-count-distinct.sh graph-compare-results.sh
diff --git a/sql-bench/as3ap.sh b/sql-bench/as3ap.sh
new file mode 100644
index 00000000000..ed796249ebb
--- /dev/null
+++ b/sql-bench/as3ap.sh
@@ -0,0 +1,636 @@
+#!@PERL@
+# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Library General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Library General Public License for more details.
+#
+# You should have received a copy of the GNU Library General Public
+# License along with this library; if not, write to the Free
+# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA
+#
+# AS3AP single-user benchmark.
+#
+
+##################### Standard benchmark inits ##############################
+
+use DBI;
+use Benchmark;
+
+chomp($pwd = `pwd`); $pwd = "." if ($pwd eq '');
+require "$pwd/bench-init.pl" || die "Can't read Configuration file: $!\n";
+
+$opt_loop_count=1;
+
+#Create tables
+
+$dbh = $server->connect();
+
+#Create Table
+$sth = $dbh->do("drop table uniques");
+$sth = $dbh->do("drop table updates");
+$sth = $dbh->do("drop table hundred");
+$sth = $dbh->do("drop table tenpct");
+$sth = $dbh->do("drop table tiny");
+
+#Temporary table
+$sth = $dbh->do("drop table saveupdates");
+
+@fields=("col_key int not null",
+ "col_int int not null",
+ "col_signed int not null",
+ "col_float float not null",
+ "col_double float not null",
+ "col_decim numeric(18,2) not null",
+ "col_date char(20) not null",
+ "col_code char(10) not null",
+ "col_name char(20) not null",
+ "col_address varchar(80) not null");
+
+do_many($dbh,$server->create("uniques",\@fields,[]));
+do_many($dbh,$server->create("updates",\@fields,[]));
+do_many($dbh,$server->create("hundred",\@fields,[]));
+do_many($dbh,$server->create("tenpct",\@fields,[]));
+do_many($dbh,$server->create("tiny",["col_key int not null"],[]));
+
+print "Start AS3AP benchmark\n\n";
+
+$start_time=new Benchmark;
+
+print "Load DATA\n";
+#Load DATA
+
+@table_names=("uniques","updates","hundred","tenpct","tiny");
+
+$loop_time=new Benchmark;
+
+if ($opt_fast && $server->{'limits'}->{'load_data_infile'})
+{
+ for ($ti = 0; $ti <= $#table_names; $ti++)
+ {
+ my $table_name = $table_names[$ti];
+ my $file = "$pwd/Data/AS3AP/${table_name}\.new";
+ print "$table_name - $file\n" if ($opt_debug);
+ $row_count += $server->insert_file($table_name,$file,$dbh);
+ }
+}
+else
+{
+ for ($ti = 0; $ti <= $#table_names; $ti++)
+ {
+ my $table_name = $table_names[$ti];
+ print "$table_name - $file\n" if ($opt_debug);
+ my $insert_start = "insert into $table_name values (";
+ open(DATA, "$pwd/Data/AS3AP/${table_name}\.new") || die "Can't open text file: $pwd/Data/AS3AP/${table_name}\.new\n";
+ while(<DATA>)
+ {
+ chomp;
+ next unless ( $_ =~ /\w/ ); # skip blank lines
+ $command = $insert_start."$_".")";
+ $command =~ $server->fix_to_insert($command);
+ print "$command\n" if ($opt_debug);
+ $sth = $dbh->do($command) or die "Got error: $DBI::errstr when executing '$command'\n";
+ $row_count++;
+ }
+ close(DATA);
+ }
+}
+
+$end_time=new Benchmark;
+print "Time for Load Data - " . "($row_count): " .
+timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+
+
+print "Create Index\n";
+
+test_command("create_idx_uniques_key_bt",
+ "time for create_idx_uniques_key_bt",
+ "create unique index uniques_key_bt on uniques (col_key)",$dbh,$opt_loop_count);
+
+test_command("create_idx_updates_key_bt",
+ "time for create_idx_updates_key_bt",
+ "create unique index updates_key_bt on updates (col_key)",$dbh,$opt_loop_count);
+
+test_command("create_idx_hundred_key_bt",
+ "time for create_idx_hundred_key_bt",
+ "create unique index hundred_key_bt on hundred (col_key)",
+ $dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_key_bt",
+ "time for create_idx_tenpct_key_bt",
+ "create unique index tenpct_key_bt on tenpct (col_key)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_key_code_bt",
+ "time for create_idx_tenpct_key_code_bt",
+ "create index tenpct_key_code_bt on tenpct (col_key,col_code)",
+ $dbh,$opt_loop_count);
+
+test_command("create_idx_tiny_key_bt",
+ "time for create_idx_tiny_key_bt",
+ "create index tiny_key_bt on tiny (col_key)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_int_bt",
+ "time for create_idx_tenpct_int_bt",
+ "create index tenpct_int_bt on tenpct (col_int)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_signed_bt",
+ "time for create_idx_tenpct_signed_bt",
+ "create index tenpct_signed_bt on tenpct (col_signed)",$dbh,$opt_loop_count);
+
+test_command("create_idx_uniques_code_h",
+ "time for create_idx_uniques_code_h",
+ "create index uniques_code_h on uniques (col_code)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_double_bt",
+ "time for create_idx_tenpct_double_bt",
+ "create index tenpct_double_bt on tenpct (col_double)",$dbh,$opt_loop_count);
+
+
+test_command("create_idx_updates_decim_bt",
+ "time for create_idx_updates_decim_bt",
+ "create index updates_decim_bt on updates (col_decim)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_float_bt",
+ "time for create_idx_tenpct_float_bt",
+ "create index tenpct_float_bt on tenpct (col_float)",$dbh,$opt_loop_count);
+
+test_command("create_idx_updates_int_bt",
+ "time for create_idx_updates_int_bt",
+ "create index updates_int_bt on updates (col_int)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_decim_bt",
+ "time for create_idx_tenpct_decim_bt",
+ "create index tenpct_decim_bt on tenpct (col_decim)",$dbh,$opt_loop_count);
+
+test_command("create_idx_hundred_code_h",
+ "time for create_idx_hundred_code_h",
+ "create index hundred_code_h on hundred (col_code)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_name_h",
+ "time for create_idx_tenpct_name_h",
+ "create index tenpct_name_h on tenpct (col_name)",$dbh,$opt_loop_count);
+
+test_command("create_idx_updates_code_h",
+ "time for create_idx_updates_code_h",
+ "create index updates_code_h on updates (col_code)",$dbh,$opt_loop_count);
+
+test_command("create_idx_tenpct_code_h",
+ "time for create_idx_tenpct_code_h",
+ "create index tenpct_code_h on tenpct (col_code)",$dbh,$opt_loop_count);
+
+test_command("create_idx_updates_double_bt",
+ "time for create_idx_updates_double_bt",
+ "create index updates_double_bt on updates (col_double)",$dbh,$opt_loop_count);
+
+test_command("create_idx_hundred_foreign",
+ "time for create_idx_hundred_foreign",
+ "alter table hundred add constraint fk_hundred_updates foreign key (col_signed)
+ references updates (col_key)",$dbh,$opt_loop_count);
+
+test_query("sel_1_cl",
+ "Time to sel_1_cl",
+ "select col_key, col_int, col_signed, col_code, col_double, col_name
+ from updates where col_key = 1000",$dbh,$opt_loop_count);
+
+test_query("join_3_cl",
+ "Time to join_3_cl",
+ "select uniques.col_signed, uniques.col_date,
+ hundred.col_signed, hundred.col_date,
+ tenpct.col_signed, tenpct.col_date
+ from uniques, hundred, tenpct
+ where uniques.col_key = hundred.col_key
+ and uniques.col_key = tenpct.col_key
+ and uniques.col_key = 1000",$dbh,$opt_loop_count);
+
+test_query("sel_100_ncl",
+ "Time to sel_100_ncl",
+ "select col_key, col_int, col_signed, col_code,col_double, col_name
+ from updates where col_int <= 100",$dbh,$opt_loop_count);
+
+test_query("table_scan",
+ "Time to table_scan",
+ "select * from uniques where col_int = 1",$dbh,$opt_loop_count);
+
+test_query("agg_func",
+ "Time for agg_func",
+ "select min(col_key) from hundred group by col_name",$dbh,$opt_loop_count);
+
+test_query("agg_scal",
+ "Time for agg_scal",
+ "select min(col_key) from uniques",$dbh,$opt_loop_count);
+
+test_query("sel_100_cl",
+ "Time for sel_100_cl",
+ "select col_key, col_int, col_signed, col_code,
+ col_double, col_name
+ from updates where col_key <= 100",$dbh,$opt_loop_count);
+
+test_query("join_3_ncl",
+ "Time for join_3_ncl",
+ "select uniques.col_signed, uniques.col_date,
+ hundred.col_signed, hundred.col_date,
+ tenpct.col_signed, tenpct.col_date
+ from uniques, hundred, tenpct
+ where uniques.col_code = hundred.col_code
+ and uniques.col_code = tenpct.col_code
+ and uniques.col_code = 'BENCHMARKS'",$dbh,$opt_loop_count);
+
+test_query("sel_10pct_ncl",
+ "Time for sel_10pct_ncl",
+ "select col_key, col_int, col_signed, col_code,
+ col_double, col_name
+ from tenpct
+ where col_name = 'THE+ASAP+BENCHMARKS+'",$dbh,$opt_loop_count);
+
+if ($limits->{'subqueries'}){
+ test_query("agg_simple_report",
+ "Time for agg_simple_report",
+ "select avg(updates.col_decim)
+ from updates
+ where updates.col_key in
+ (select updates.col_key
+ from updates, hundred
+ where hundred.col_key = updates.col_key
+ and updates.col_decim > 980000000)",$dbh,$opt_loop_count);
+}else{
+ print "agg_simple_report - Failed\n\n";
+}
+
+test_query("agg_info_retrieval",
+ "Time for agg_info_retrieval",
+ "select count(col_key)
+ from tenpct
+ where col_name = 'THE+ASAP+BENCHMARKS'
+ and col_int <= 100000000
+ and col_signed between 1 and 99999999
+ and not (col_float between -450000000 and 450000000)
+ and col_double > 600000000
+ and col_decim < -600000000",$dbh,$opt_loop_count);
+
+if ($limits->{'views'}){
+ test_query("agg_create_view",
+ "Time for agg_create_view",
+ "create view
+ reportview(col_key,col_signed,col_date,col_decim,
+ col_name,col_code,col_int) as
+ select updates.col_key, updates.col_signed,
+ updates.col_date, updates.col_decim,
+ hundred.col_name, hundred.col_code,
+ hundred.col_int
+ from updates, hundred
+ where updates.col_key = hundred.col_key",$dbh,$opt_loop_count);
+
+ test_query("agg_subtotal_report",
+ "Time for agg_subtotal_report",
+ "select avg(col_signed), min(col_signed), max(col_signed),
+ max(col_date), min(col_date),
+ count(distinct col_name), count(col_name),
+ col_code, col_int
+ from reportview
+ where col_decim >980000000
+ group by col_code, col_int",$dbh,$opt_loop_count);
+
+
+ test_query("agg_total_report",
+ "Time for agg_total_report",
+ "select avg(col_signed), min(col_signed), max(col_signed),
+ max(col_date), min(col_date),
+ count(distinct col_name), count(col_name),
+ count(col_code), count(col_int)
+ from reportview
+ where col_decim >980000000",$dbh,$opt_loop_count);
+}else{
+ print "agg_create_view - Failed\n\n";
+ print "agg_subtotal_report - Failed\n\n";
+ print "agg_total_report - Failed\n\n";
+}
+
+#fix from here
+test_query("join_2_cl",
+ "Time for join_2_cl",
+ "select uniques.col_signed, uniques.col_name,
+ hundred.col_signed, hundred.col_name
+ from uniques, hundred
+ where uniques.col_key = hundred.col_key
+ and uniques.col_key =1000"
+ ,$dbh,$opt_loop_count);
+
+test_query("join_2",
+ "Time for join_2",
+ "select uniques.col_signed, uniques.col_name,
+ hundred.col_signed, hundred.col_name
+ from uniques, hundred
+ where uniques.col_address = hundred.col_address
+ and uniques.col_address = 'SILICON VALLEY'"
+ ,$dbh,$opt_loop_count);
+
+test_query("sel_variable_select_low",
+ "Time for sel_variable_select_low",
+ "select col_key, col_int, col_signed, col_code,
+ col_double, col_name
+ from tenpct
+ where col_signed < -500000000"
+ ,$dbh,$opt_loop_count);
+
+test_query("sel_variable_select_high",
+ "Time for sel_variable_select_high",
+ "select col_key, col_int, col_signed, col_code,
+ col_double, col_name
+ from tenpct
+ where col_signed < -250000000"
+ ,$dbh,$opt_loop_count);
+
+test_query("join_4_cl",
+ "Time for join_4_cl",
+ "select uniques.col_date, hundred.col_date,
+ tenpct.col_date, updates.col_date
+ from uniques, hundred, tenpct, updates
+ where uniques.col_key = hundred.col_key
+ and uniques.col_key = tenpct.col_key
+ and uniques.col_key = updates.col_key
+ and uniques.col_key = 1000"
+ ,$dbh,$opt_loop_count);
+
+test_query("proj_100",
+ "Time for proj_100",
+ "select distinct col_address, col_signed from hundred"
+ ,$dbh,$opt_loop_count);
+
+test_query("join_4_ncl",
+ "Time for join_4_ncl",
+ "select uniques.col_date, hundred.col_date,
+ tenpct.col_date, updates.col_date
+ from uniques, hundred, tenpct, updates
+ where uniques.col_code = hundred.col_code
+ and uniques.col_code = tenpct.col_code
+ and uniques.col_code = updates.col_code
+ and uniques.col_code = 'BENCHMARKS'"
+ ,$dbh,$opt_loop_count);
+
+test_query("proj_10pct",
+ "Time for proj_10pct",
+ "select distinct col_signed from tenpct"
+ ,$dbh,$opt_loop_count);
+
+test_query("sel_1_ncl",
+ "Time for sel_1_ncl",
+ "select col_key, col_int, col_signed, col_code,
+ col_double, col_name
+ from updates where col_code = 'BENCHMARKS'"
+ ,$dbh,$opt_loop_count);
+
+test_query("join_2_ncl",
+ "Time for join_2_ncl",
+ "select uniques.col_signed, uniques.col_name,
+ hundred.col_signed, hundred.col_name
+ from uniques, hundred
+ where uniques.col_code = hundred.col_code
+ and uniques.col_code = 'BENCHMARKS'"
+ ,$dbh,$opt_loop_count);
+
+if ($limits->{'foreign_key'}){
+ do_many($dbh,$server->create("integrity_temp",\@fields,[]));
+
+ test_query("integrity_test_1",
+ "Time for integrity_test",
+ "insert into integrity_temp select *
+ from hundred where col_int=0",$dbh,$opt_loop_count);
+
+ test_query("integrity_test_2",
+ "Time for integrity_test",
+ "update hundred set col_signed = '-500000000'
+ where col_int = 0",$dbh,$opt_loop_count);
+
+ test_query("integrity_test_3",
+ "Time for integrity_test",
+ "update hundred set col_signed = '-500000000'
+ where col_int = 0",$dbh,$opt_loop_count);
+
+
+}else{
+ print "integrity_test - Failed\n\n";
+}
+
+push @drop_seq_command,$server->drop_index("updates","updates_int_bt");
+push @drop_seq_command,$server->drop_index("updates","updates_double_bt");
+push @drop_seq_command,$server->drop_index("updates","updates_decim_bt");
+push @drop_seq_command,$server->drop_index("updates","updates_code_h");
+
+test_many_command("Drop updates keys",
+ "Time for drop_updates_keys",
+ \@drop_seq_command,$dbh,$opt_loop_count);
+
+do_many($dbh,$server->create("saveupdates",\@fields,[]));
+
+test_command("bulk_save",
+ "Time for bulk_save",
+ "insert into saveupdates select *
+ from updates where col_key between 5000 and 5999"
+ ,$dbh,$opt_loop_count);
+
+test_command("bulk_modify",
+ "Time for bulk_modify",
+ "update updates
+ set col_key = col_key - 100000
+ where col_key between 5000 and 5999"
+ ,$dbh,$opt_loop_count);
+
+safe_command("upd_append_duplicate",
+ "Time for upd_append_duplicate",
+ "insert into updates
+ values (6000, 0, 60000, 39997.90,
+ 50005.00, 50005.00,
+ '11/10/1985', 'CONTROLLER',
+ 'ALICE IN WONDERLAND',
+ 'UNIVERSITY OF ILLINOIS AT CHICAGO')"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_remove_duplicate",
+ "Time for upd_remove_duplicate",
+ "delete from updates where col_key = 6000 and col_int = 0"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_app_t_mid",
+ "Time for upd_app_t_mid",
+ "insert into updates
+ values (5005, 5005, 50005, 50005.00, 50005.00,
+ 50005.00, '1/1/1988', 'CONTROLLER',
+ 'ALICE IN WONDERLAND',
+ 'UNIVERSITY OF ILLINOIS AT CHICAGO')"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_mod_t_mid",
+ "Time for upd_mod_t_mid",
+ "update updates set col_key = '-5000'
+ where col_key = 5005"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_del_t_mid",
+ "Time for upd_del_t_mid",
+ "delete from updates
+ where (col_key='5005') or (col_key='-5000')"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_app_t_end",
+ "Time for upd_app_t_end",
+ "delete from updates
+ where (col_key='5005') or (col_key='-5000')"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_mod_t_end",
+ "Time for upd_mod_t_end",
+ "update updates
+ set col_key = -1000
+ where col_key = 1000000001"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_del_t_end",
+ "Time for upd_del_t_end",
+ "delete from updates where col_key = -1000"
+ ,$dbh,$opt_loop_count);
+
+test_command("create_idx_updates_code_h",
+ "time for create_idx_updates_code_h",
+ "create index updates_code_h on updates (col_code)",
+ $dbh,$opt_loop_count);
+
+test_command("upd_app_t_mid",
+ "Time for upd_app_t_mid",
+ "insert into updates
+ values (5005, 5005, 50005, 50005.00, 50005.00,
+ 50005.00, '1/1/1988', 'CONTROLLER',
+ 'ALICE IN WONDERLAND',
+ 'UNIVERSITY OF ILLINOIS AT CHICAGO')"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_mod_t_cod",
+ "Time for upd_mod_t_cod",
+ "update updates
+ set col_code = 'SQL+GROUPS'
+ where col_key = 5005"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_del_t_mid",
+ "Time for upd_del_t_mid",
+ "delete from updates
+ where (col_key='5005') or (col_key='-5000')"
+ ,$dbh,$opt_loop_count);
+
+test_command("create_idx_updates_int_bt",
+ "time for create_idx_updates_int_bt",
+ "create index updates_int_bt on updates (col_int)",
+ $dbh,$opt_loop_count);
+
+test_command("upd_app_t_mid",
+ "Time for upd_app_t_mid",
+ "insert into updates
+ values (5005, 5005, 50005, 50005.00, 50005.00,
+ 50005.00, '1/1/1988', 'CONTROLLER',
+ 'ALICE IN WONDERLAND',
+ 'UNIVERSITY OF ILLINOIS AT CHICAGO')"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_mod_t_int",
+ "Time for upd_mod_t_int",
+ "update updates set col_int = 50015 where col_key = 5005"
+ ,$dbh,$opt_loop_count);
+
+test_command("upd_del_t_mid",
+ "Time for upd_del_t_mid",
+ "delete from updates
+ where (col_key='5005') or (col_key='-5000')"
+ ,$dbh,$opt_loop_count);
+
+test_command("bulk_append",
+ "Time for bulk_append",
+ "insert into updates select * from saveupdates"
+ ,$dbh,$opt_loop_count);
+
+test_command("bulk_delete",
+ "Time for bulk_delete",
+ "delete from updates where col_key < 0"
+ ,$dbh,$opt_loop_count);
+
+################################ END ###################################
+####
+#### End of the test...Finally print time used to execute the
+#### whole test.
+
+$dbh->disconnect;
+
+end_benchmark($start_time);
+
+############################ HELP FUNCTIONS ##############################
+
+sub test_query
+{
+ my($test_text,$result_text,$query,$dbh,$count)=@_;
+ my($i,$loop_time,$end_time);
+
+ print $test_text . "\n";
+ $loop_time=new Benchmark;
+ for ($i=0 ; $i < $count ; $i++)
+ {
+ defined(fetch_all_rows($dbh,$query)) or warn $DBI::errstr;
+ }
+ $end_time=new Benchmark;
+ print $result_text . "($count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+
+sub test_command
+{
+ my($test_text,$result_text,$query,$dbh,$count)=@_;
+ my($i,$loop_time,$end_time);
+
+ print $test_text . "\n";
+ $loop_time=new Benchmark;
+ for ($i=0 ; $i < $count ; $i++)
+ {
+ $dbh->do($query) or die $DBI::errstr;
+ }
+ $end_time=new Benchmark;
+ print $result_text . "($count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+sub safe_command
+{
+ my($test_text,$result_text,$query,$dbh,$count)=@_;
+ my($i,$loop_time,$end_time);
+
+ print $test_text . "\n";
+ $loop_time=new Benchmark;
+ for ($i=0 ; $i < $count ; $i++)
+ {
+ safe_do_many($dbh,$query);
+ }
+ $end_time=new Benchmark;
+ print $result_text . "($count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+sub test_many_command
+{
+ my($test_text,$result_text,$query,$dbh,$count)=@_;
+ my($i,$loop_time,$end_time);
+
+ $loop_time=new Benchmark;
+ for ($i=0 ; $i < $count ; $i++)
+ {
+ safe_do_many($dbh, @$query);
+ }
+ $end_time=new Benchmark;
+ print $result_text . "($count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+
diff --git a/sql-bench/run-all-tests.sh b/sql-bench/run-all-tests.sh
index c2f687a9375..eb07d85be03 100644
--- a/sql-bench/run-all-tests.sh
+++ b/sql-bench/run-all-tests.sh
@@ -195,6 +195,10 @@ while (<test-*>)
print "Summary for $prog: ", join(" ",@prog_sum), "\n";
}
}
+ elsif ($last_line =~ /^Test skipped/i)
+ {
+ print "$last_line\n";
+ }
else
{
$errors++;
diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh
index 4dd762a804b..1e755be8f43 100644
--- a/sql-bench/server-cfg.sh
+++ b/sql-bench/server-cfg.sh
@@ -121,7 +121,7 @@ sub new
$self->{'double_quotes'} = 1; # Can handle: 'Walker''s'
$self->{'vacuum'} = 1; # When using with --fast
$self->{'drop_attr'} = "";
- $self->{'transactions'} = 1; # Transactions enabled
+ $self->{'transactions'} = 0; # Transactions disabled by default
$limits{'NEG'} = 1; # Supports -id
$limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int;
@@ -197,12 +197,14 @@ sub new
$main::opt_create_options =~ /type=innodb/i)
{
$limits{'max_text_size'} = 8000; # Limit in Innobase
+ $self->{'transactions'} = 1; # Transactions enabled
}
if (defined($main::opt_create_options) &&
$main::opt_create_options =~ /type=gemini/i)
{
$limits{'working_blobs'} = 0; # Blobs not implemented yet
$limits{'max_tables'} = 500;
+ $self->{'transactions'} = 1; # Transactions enabled
}
return $self;
@@ -1333,6 +1335,14 @@ sub query {
return $sql;
}
+sub fix_for_insert
+{
+ my ($self,$cmd) = @_;
+ $cmd =~ s/\'\'/\' \'/g;
+ return $cmd;
+}
+
+
sub drop_index
{
my ($self,$table,$index) = @_;
@@ -1556,6 +1566,14 @@ sub query {
return $sql;
}
+sub fix_for_insert
+{
+ my ($self,$cmd) = @_;
+ $cmd =~ s/\'\'/\' \'/g;
+ return $cmd;
+}
+
+
sub drop_index
{
my ($self,$table,$index) = @_;
@@ -1789,6 +1807,16 @@ sub query {
return $sql;
}
+
+sub fix_for_insert
+{
+ my ($self,$cmd) = @_;
+ $cmd =~ s/\\'//g;
+ return $cmd;
+}
+
+
+
sub drop_index
{
my ($self,$table,$index) = @_;
diff --git a/sql-bench/test-ATIS.sh b/sql-bench/test-ATIS.sh
index fa66dadd03e..da866f2eb66 100644
--- a/sql-bench/test-ATIS.sh
+++ b/sql-bench/test-ATIS.sh
@@ -125,8 +125,7 @@ if (!$opt_skip_create)
chomp;
next unless ( $_ =~ /\w/ ); # skip blank lines
my $command = $insert_start . $_ . ")";
- $command =~ s/\'\'/\' \'/g if ($opt_server =~ /empress/i || $opt_server =~ /oracle/i);
- $command =~ s/\\'//g if ($opt_server =~ /informix/i);
+ $command = $server->fix_for_insert($command);
print "$command\n" if ($opt_debug);
$command =~ s/\\'/\'\'/g if ($double_quotes);
diff --git a/sql-bench/test-transactions.sh b/sql-bench/test-transactions.sh
new file mode 100644
index 00000000000..62de009ab33
--- /dev/null
+++ b/sql-bench/test-transactions.sh
@@ -0,0 +1,268 @@
+#!@PERL@
+#
+# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Library General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Library General Public License for more details.
+#
+# You should have received a copy of the GNU Library General Public
+# License along with this library; if not, write to the Free
+# Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+# MA 02111-1307, USA
+#
+# Test of transactions performance.
+#
+
+##################### Standard benchmark inits ##############################
+
+use DBI;
+use Benchmark;
+use warnings;
+
+$opt_groups=27; # Characters are 'A' -> Z
+
+$opt_loop_count=100000; # Change this to make test harder/easier
+$opt_medium_loop_count=100; # Change this to make test harder/easier
+
+chomp($pwd = `pwd`); $pwd = "." if ($pwd eq '');
+require "$pwd/bench-init.pl" || die "Can't read Configuration file: $!\n";
+
+# Avoid warnings for variables in bench-init.pl
+our ($opt_small_test, $opt_small_tables, $opt_debug, $opt_force);
+
+if ($opt_small_test || $opt_small_tables)
+{
+ $opt_loop_count/=100;
+ $opt_medium_loop_count/=10;
+}
+
+
+if (!$server->{transactions} && !$opt_force)
+{
+ print "Test skipped because the database doesn't support transactions\n";
+ exit(0);
+}
+
+####
+#### Connect and start timeing
+####
+
+$start_time=new Benchmark;
+$dbh = $server->connect();
+
+###
+### Create Table
+###
+
+print "Creating tables\n";
+$dbh->do("drop table bench1");
+$dbh->do("drop table bench2");
+
+do_many($dbh,$server->create("bench1",
+ ["idn int NOT NULL",
+ "rev_idn int NOT NULL",
+ "region char(1) NOT NULL",
+ "grp int NOT NULL",
+ "updated tinyint NOT NULL"],
+ ["primary key (idn)",
+ "unique (region,grp)"]));
+do_many($dbh,$server->create("bench2",
+ ["idn int NOT NULL",
+ "rev_idn int NOT NULL",
+ "region char(1) NOT NULL",
+ "grp int NOT NULL",
+ "updated tinyint NOT NULL"],
+ ["primary key (idn)",
+ "unique (region,grp)"]));
+
+$dbh->{AutoCommit} = 0;
+
+###
+### Test insert perfomance
+###
+
+test_insert("bench1","insert_commit",0);
+test_insert("bench2","insert_autocommit",1);
+
+sub test_insert
+{
+ my ($table, $test_name, $auto_commit)= @_;
+ my ($loop_time,$end_time,$id,$rev_id,$grp,$region);
+
+ $dbh->{AutoCommit}= $auto_commit;
+ $loop_time=new Benchmark;
+
+ for ($id=0,$rev_id=$opt_loop_count-1 ; $id < $opt_loop_count ;
+ $id++,$rev_id--)
+ {
+ $grp=$id/$opt_groups;
+ $region=chr(65+$id%$opt_groups);
+ do_query($dbh,"insert into $table values ($id,$rev_id,'$region',$grp,0)");
+ }
+
+ $dbh->commit if (!$auto_commit);
+ $end_time=new Benchmark;
+ print "Time for $test_name ($opt_loop_count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+###
+### Test update perfomance
+###
+
+test_update("bench1","update_commit",0);
+test_update("bench2","update_autocommit",1);
+
+sub test_update
+{
+ my ($table, $test_name, $auto_commit)= @_;
+ my ($loop_time,$end_time,$id);
+
+ $dbh->{AutoCommit}= $auto_commit;
+ $loop_time=new Benchmark;
+
+ for ($id=0 ; $id < $opt_loop_count ; $id++)
+ {
+ do_query($dbh,"update bench1 set updated=1 where idn=$id");
+ }
+
+ $dbh->commit if (!$auto_commit);
+ $end_time=new Benchmark;
+ print "Time for $test_name ($opt_loop_count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+###
+### Test delete perfomance
+###
+
+test_delete("bench1","delete_commit",0);
+test_delete("bench2","delete_autocommit",1);
+
+sub test_delete
+{
+ my ($table, $test_name, $auto_commit)= @_;
+ my ($loop_time,$end_time,$id);
+
+ $dbh->{AutoCommit}= $auto_commit;
+ $loop_time=new Benchmark;
+
+ for ($id=0 ; $id < $opt_loop_count ; $id++)
+ {
+ do_query($dbh,"delete from $table where idn=$id");
+ }
+ $dbh->commit if (!$auto_commit);
+ $end_time=new Benchmark;
+ print "Time for $test_name ($opt_loop_count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+###
+### Test rollback performance
+###
+
+print "Test transactions rollback performance\n" if($opt_debug);
+
+##
+## Insert rollback test
+##
+
+#
+# Test is done by inserting 10 rows in a table with lots of rows and
+# then doing a rollback on these
+#
+
+{
+ my ($id,$rev_id,$grp,$region,$end,$loop_time,$end_time,$commit_loop,$count);
+
+ $dbh->{AutoCommit} = 0;
+ $loop_time=new Benchmark;
+ $end=$opt_loop_count*2;
+ $count=0;
+
+ for ($commit_loop=1, $id=$opt_loop_count ; $id < $end ;
+ $id++, $commit_loop++)
+ {
+ $rev_id=$end-$id;
+ $grp=$id/$opt_groups;
+ $region=chr(65+$id%$opt_groups);
+ do_query($dbh,"insert into bench1 values ($id,$rev_id,'$region',$grp,0)");
+ if ($commit_loop >= $opt_medium_loop_count)
+ {
+ $dbh->rollback;
+ $commit_loop=0;
+ $count++;
+ }
+ }
+ if ($commit_loop > 1)
+ {
+ $dbh->rollback;
+ $count++;
+ }
+ $end_time=new Benchmark;
+ print "Time for insert_rollback ($count:$opt_loop_count): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+}
+
+###
+### Update rollback test
+###
+
+if (0)
+{
+
+ $dbh->{AutoCommit} = 0;
+
+ $loop_time=new Benchmark;
+
+ for ($id=0,$rev_id=$opt_medium_loop_count ; $id < $opt_medium_loop_count; $id++,$rev_id--)
+ {
+ $grp=$id/$opt_groups;
+ $region=chr(65+$id%$opt_groups);
+ do_query($dbh,"update bench1 set region='$region',grp=$grp where idn=$id");
+ }
+
+ $dbh->rollback;
+
+ $end_time=new Benchmark;
+ print "Time for update rollback (" . ($opt_medium_loop_count) . "): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+
+### Delete rollback test
+ $dbh->{AutoCommit} = 0;
+
+ $loop_time=new Benchmark;
+
+ for ($id=0,$rev_id=$opt_medium_loop_count; $id < $opt_medium_loop_count; $id++,$rev_id--)
+ {
+ $grp=$id/$opt_groups;
+ $region=chr(65+$id%$opt_groups);
+ do_query($dbh,"delete from bench1 where idn=$id");
+ }
+
+ $dbh->rollback;
+
+ $end_time=new Benchmark;
+ print "Time for delete rollback (" . ($opt_medium_loop_count) . "): " .
+ timestr(timediff($end_time, $loop_time),"all") . "\n\n";
+
+ $dbh->{AutoCommit} = 1;
+}
+
+
+####
+#### End of benchmark
+####
+
+$sth = $dbh->do("drop table bench1" . $server->{'drop_attr'}) or die $DBI::errstr;
+$sth = $dbh->do("drop table bench2" . $server->{'drop_attr'}) or die $DBI::errstr;
+
+$dbh->disconnect; # close connection
+end_benchmark($start_time);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 91ec8826f73..86cc3283955 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -2164,10 +2164,7 @@ bool Item_func_match::eq(const Item *item) const
double Item_func_match::val()
{
if (ft_handler==NULL)
- init_search(1);
-
- if ((null_value= (ft_handler==NULL)))
- return 0.0;
+ return -1.0;
if (join_key)
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index d903651e81f..2774e2a4050 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -47,7 +47,6 @@ char pstack_file_name[80];
#if defined(HAVE_DEC_3_2_THREADS) || defined(SIGNALS_DONT_BREAK_READ)
#define HAVE_CLOSE_SERVER_SOCK 1
-void close_server_sock();
#endif
extern "C" { // Because of SCO 3.2V4.2
@@ -207,7 +206,7 @@ SHOW_COMP_OPTION have_openssl=SHOW_OPTION_NO;
SHOW_COMP_OPTION have_symlink=SHOW_OPTION_YES;
-static bool opt_skip_slave_start = 0; // if set, slave is not autostarted
+static bool opt_skip_slave_start = 0; // If set, slave is not autostarted
static bool opt_do_pstack = 0;
static ulong opt_specialflag=SPECIAL_ENGLISH;
static ulong back_log,connect_timeout,concurrency;
@@ -229,8 +228,10 @@ int segfaulted = 0; // ensure we do not enter SIGSEGV handler twice
extern MASTER_INFO glob_mi;
extern int init_master_info(MASTER_INFO* mi);
-// if sql_bin_update is true, SQL_LOG_UPDATE and SQL_LOG_BIN are kept in sync,
-// and are treated as aliases for each other
+/*
+ If sql_bin_update is true, SQL_LOG_UPDATE and SQL_LOG_BIN are kept in sync,
+ and are treated as aliases for each other
+*/
static bool kill_in_progress=FALSE;
static struct rand_struct sql_rand;
@@ -385,6 +386,7 @@ static char *get_relative_path(const char *path);
static void fix_paths(void);
static pthread_handler_decl(handle_connections_sockets,arg);
static int bootstrap(FILE *file);
+static void close_server_sock();
static bool read_init_file(char *file_name);
#ifdef __NT__
static pthread_handler_decl(handle_connections_namedpipes,arg);
@@ -454,9 +456,7 @@ static void close_connections(void)
if (error != 0 && !count++)
sql_print_error("Got error %d from pthread_cond_timedwait",error);
#endif
-#if defined(HAVE_DEC_3_2_THREADS) || defined(SIGNALS_DONT_BREAK_READ)
close_server_sock();
-#endif
}
(void) pthread_mutex_unlock(&LOCK_thread_count);
#endif /* __WIN__ */
@@ -570,28 +570,31 @@ static void close_connections(void)
DBUG_VOID_RETURN;
}
-#ifdef HAVE_CLOSE_SERVER_SOCK
-void close_server_sock()
+static void close_server_sock()
{
+#ifdef HAVE_CLOSE_SERVER_SOCK
DBUG_ENTER("close_server_sock");
- if (ip_sock != INVALID_SOCKET)
+ my_socket tmp_sock;
+ tmp_sock=ip_sock;
+ if (tmp_sock != INVALID_SOCKET)
{
- DBUG_PRINT("info",("closing TCP/IP socket"));
- VOID(shutdown(ip_sock,2));
- VOID(closesocket(ip_sock));
ip_sock=INVALID_SOCKET;
+ DBUG_PRINT("info",("closing TCP/IP socket"));
+ VOID(shutdown(tmp_sock,2));
+ VOID(closesocket(tmp_sock));
}
- if (unix_sock != INVALID_SOCKET)
+ tmp_sock=unix_sock;
+ if (tmp_sock != INVALID_SOCKET)
{
+ unix_sock=INVALID_SOCKET;
DBUG_PRINT("info",("closing Unix socket"));
- VOID(shutdown(unix_sock,2));
- VOID(closesocket(unix_sock));
+ VOID(shutdown(tmp_sock,2));
+ VOID(closesocket(tmp_sock));
VOID(unlink(mysql_unix_port));
- unix_sock=INVALID_SOCKET;
}
DBUG_VOID_RETURN;
-}
#endif
+}
void kill_mysql(void)
{
@@ -607,10 +610,12 @@ void kill_mysql(void)
{
DBUG_PRINT("error",("Got error: %ld from SetEvent",GetLastError()));
}
- // or:
- // HANDLE hEvent=OpenEvent(0, FALSE, "MySqlShutdown");
- // SetEvent(hEventShutdown);
- // CloseHandle(hEvent);
+ /*
+ or:
+ HANDLE hEvent=OpenEvent(0, FALSE, "MySqlShutdown");
+ SetEvent(hEventShutdown);
+ CloseHandle(hEvent);
+ */
}
#elif defined(OS2)
pthread_cond_signal( &eventShutdown); // post semaphore
@@ -647,8 +652,7 @@ static void __cdecl kill_server(int sig_ptr)
int sig=(int) (long) sig_ptr; // This is passed a int
DBUG_ENTER("kill_server");
- // if there is a signal during the kill in progress, we do not need
- // another one
+ // if there is a signal during the kill in progress, ignore the other
if (kill_in_progress) // Safety
RETURN_FROM_KILL_SERVER;
kill_in_progress=TRUE;
@@ -1207,10 +1211,12 @@ static void start_signal_handler(void)
static sig_handler handle_segfault(int sig)
{
THD *thd=current_thd;
- // strictly speaking, one needs a mutex here
- // but since we have got SIGSEGV already, things are a mess
- // so not having the mutex is not as bad as possibly using a buggy
- // mutex - so we keep things simple
+ /*
+ Strictly speaking, one needs a mutex here
+ but since we have got SIGSEGV already, things are a mess
+ so not having the mutex is not as bad as possibly using a buggy
+ mutex - so we keep things simple
+ */
if (segfaulted)
{
fprintf(stderr, "Fatal signal %d while backtracing\n", sig);
@@ -1559,8 +1565,10 @@ pthread_handler_decl(handle_shutdown,arg)
// close semaphore and kill server
pthread_cond_destroy( &eventShutdown);
- // exit main loop on main thread, so kill will be done from
- // main thread (this is thread 2)
+ /*
+ Exit main loop on main thread, so kill will be done from
+ main thread (this is thread 2)
+ */
abort_loop = 1;
// unblock select()
@@ -1584,8 +1592,9 @@ static void open_log(MYSQL_LOG *log, const char *hostname,
char tmp[FN_REFLEN];
if (!opt_name || !opt_name[0])
{
- /* TODO: The following should be using fn_format(); We just need to
- first change fn_format() to cut the file name if it's too long.
+ /*
+ TODO: The following should be using fn_format(); We just need to
+ first change fn_format() to cut the file name if it's too long.
*/
strmake(tmp,hostname,FN_REFLEN-5);
strmov(strcend(tmp,'.'),extension);
@@ -2136,9 +2145,11 @@ int main(int argc, char **argv)
}
}
- // This is a WIN95 machine or a start of mysqld as a standalone program
- // we have to pass the arguments, in case of NT-service this will be done
- // by ServiceMain()
+ /*
+ This is a WIN95 machine or a start of mysqld as a standalone program
+ we have to pass the arguments, in case of NT-service this will be done
+ by ServiceMain()
+ */
Service.my_argc=argc;
Service.my_argv=argv;
@@ -2726,8 +2737,10 @@ static struct option long_options[] = {
{"master-ssl-cert", optional_argument, 0, (int) OPT_MASTER_SSL_CERT},
{"myisam-recover", optional_argument, 0, (int) OPT_MYISAM_RECOVER},
{"memlock", no_argument, 0, (int) OPT_MEMLOCK},
- // needs to be available for the test case to pass in non-debugging mode
- // is a no-op
+ /*
+ Option needs to be available for the test case to pass in non-debugging
+ mode. is a no-op.
+ */
{"disconnect-slave-event-count", required_argument, 0,
(int) OPT_DISCONNECT_SLAVE_EVENT_COUNT},
{"abort-slave-event-count", required_argument, 0,
@@ -2762,8 +2775,7 @@ static struct option long_options[] = {
(int) OPT_REPLICATE_WILD_IGNORE_TABLE},
{"replicate-rewrite-db", required_argument, 0,
(int) OPT_REPLICATE_REWRITE_DB},
- // In replication, we may need to tell the other servers how to connect
- // to us
+ // In replication, we may need to tell the other servers how to connect
{"report-host", required_argument, 0, (int) OPT_REPORT_HOST},
{"report-user", required_argument, 0, (int) OPT_REPORT_USER},
{"report-password", required_argument, 0, (int) OPT_REPORT_PASSWORD},
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 7c5b56aa8b6..32318180081 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -2174,8 +2174,8 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name,
int setup_ftfuncs(THD *thd)
{
- List_iterator<Item_func_match> li(thd->lex.ftfunc_list),
- lj(thd->lex.ftfunc_list);
+ List_iterator<Item_func_match> li(thd->lex.select_lex.ftfunc_list),
+ lj(thd->lex.select_lex.ftfunc_list);
Item_func_match *ftf, *ftf2;
while ((ftf=li++))
@@ -2195,7 +2195,7 @@ int setup_ftfuncs(THD *thd)
int init_ftfuncs(THD *thd, bool no_order)
{
- List_iterator<Item_func_match> li(thd->lex.ftfunc_list);
+ List_iterator<Item_func_match> li(thd->lex.select_lex.ftfunc_list);
Item_func_match *ifm;
DBUG_PRINT("info",("Performing FULLTEXT search"));
thd->proc_info="FULLTEXT initialization";
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 464216d56ba..63e003178c2 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1,24 +1,28 @@
/* Copyright (C) 2000 MySQL AB
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
/*
Delete of records and truncate of tables.
+
Multi-table deletes were introduced by Monty and Sinisa
*/
+
+
#include "mysql_priv.h"
#include "ha_innobase.h"
#include "sql_select.h"
@@ -75,7 +79,7 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
if (error)
DBUG_RETURN(-1);
if ((select && select->check_quick(test(thd->options & SQL_SAFE_UPDATES),
- limit)) ||
+ limit)) ||
!limit)
{
delete select;
@@ -125,8 +129,8 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
}
init_read_record(&info,thd,table,select,1,1);
- init_ftfuncs(thd,1);
deleted=0L;
+ init_ftfuncs(thd,1);
thd->proc_info="updating";
while (!(error=info.read_record(&info)) && !thd->killed)
{
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 20a645e52af..0eabd971027 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1718,7 +1718,7 @@ mysql_execute_command(void)
table_count)) && ! thd->fatal_error)
{
res=mysql_select(thd,tables,select_lex->item_list,
- select_lex->where,select_lex->ftfunc_list,
+ select_lex->where,
(ORDER *)NULL,(ORDER *)NULL,(Item *)NULL,
(ORDER *)NULL,
select_lex->options | thd->options |
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index acb29743356..abb1d891166 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -35,11 +35,10 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref",
"MAYBE_REF","ALL","range","index","fulltext" };
static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
- DYNAMIC_ARRAY *keyuse,List<Item_func_match> &ftfuncs);
+ DYNAMIC_ARRAY *keyuse);
static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,
JOIN_TAB *join_tab,
- uint tables,COND *conds,table_map table_map,
- List<Item_func_match> &ftfuncs);
+ uint tables,COND *conds,table_map table_map);
static int sort_keyuse(KEYUSE *a,KEYUSE *b);
static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key);
static void find_best_combination(JOIN *join,table_map rest_tables);
@@ -156,7 +155,6 @@ int handle_select(THD *thd, LEX *lex, select_result *result)
res=mysql_select(thd,(TABLE_LIST*) select_lex->table_list.first,
select_lex->item_list,
select_lex->where,
- select_lex->ftfunc_list,
(ORDER*) select_lex->order_list.first,
(ORDER*) select_lex->group_list.first,
select_lex->having,
@@ -177,7 +175,6 @@ int handle_select(THD *thd, LEX *lex, select_result *result)
int
mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
- List<Item_func_match> &ftfuncs,
ORDER *order, ORDER *group,Item *having,ORDER *proc_param,
ulong select_options,select_result *result)
{
@@ -220,7 +217,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
if (having->with_sum_func)
having->split_sum_func(all_fields);
}
- if (setup_ftfuncs(thd,tables,ftfuncs)) /* should be after having->fix_fields */
+ if (setup_ftfuncs(thd)) /* should be after having->fix_fields */
DBUG_RETURN(-1);
/*
Check if one one uses a not constant column with group functions
@@ -408,8 +405,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
/* Calculate how to do the join */
thd->proc_info="statistics";
- if (make_join_statistics(&join,tables,conds,&keyuse,ftfuncs) ||
- thd->fatal_error)
+ if (make_join_statistics(&join,tables,conds,&keyuse) || thd->fatal_error)
goto err;
thd->proc_info="preparing";
result->initialize_tables(&join);
@@ -532,7 +528,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
make_join_readinfo(&join,
(select_options & (SELECT_DESCRIBE |
SELECT_NO_JOIN_CACHE)) |
- (ftfuncs.elements ? SELECT_NO_JOIN_CACHE : 0));
+ (thd->lex.select_lex.ftfunc_list.elements ? SELECT_NO_JOIN_CACHE : 0));
/* Need to tell Innobase that to play it safe, it should fetch all
columns of the tables: this is because MySQL
@@ -599,18 +595,8 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
}
/* Perform FULLTEXT search before all regular searches */
- if (ftfuncs.elements)
- {
- List_iterator_fast<Item_func_match> li(ftfuncs);
- Item_func_match *ifm;
- DBUG_PRINT("info",("Performing FULLTEXT search"));
- thd->proc_info="FULLTEXT search init";
+ init_ftfuncs(thd,test(order));
- while ((ifm=li++))
- {
- ifm->init_search(test(order));
- }
- }
/* Create a tmp table if distinct or if the sort is too complicated */
if (need_tmp)
{
@@ -902,8 +888,7 @@ static ha_rows get_quick_record_count(SQL_SELECT *select,TABLE *table,
static bool
make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
- DYNAMIC_ARRAY *keyuse_array,
- List<Item_func_match> &ftfuncs)
+ DYNAMIC_ARRAY *keyuse_array)
{
int error;
uint i,table_count,const_count,found_ref,refs,key,const_ref,eq_part;
@@ -1015,7 +1000,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
if (conds || outer_join)
if (update_ref_and_keys(join->thd,keyuse_array,stat,join->tables,
- conds,~outer_join,ftfuncs))
+ conds,~outer_join))
DBUG_RETURN(1);
/* loop until no more const tables are found */
@@ -1524,8 +1509,7 @@ sort_keyuse(KEYUSE *a,KEYUSE *b)
static bool
update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
- uint tables, COND *cond, table_map normal_tables,
- List<Item_func_match> &ftfuncs)
+ uint tables, COND *cond, table_map normal_tables)
{
uint and_level,i,found_eq_constant;
@@ -1553,7 +1537,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
add_key_part(keyuse,field);
}
- if (ftfuncs.elements)
+ if (thd->lex.select_lex.ftfunc_list.elements)
{
add_ft_keys(keyuse,join_tab,cond,normal_tables);
}
@@ -1561,7 +1545,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
/*
** remove ref if there is a keypart which is a ref and a const.
** remove keyparts without previous keyparts.
- ** Special treatment for ft-keys. SerG.
+ ** Special treatment for ft-keys.
*/
if (keyuse->elements)
{
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index a6ea88d9adc..b2ffb97fa81 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -68,7 +68,6 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
res=mysql_select(thd, (TABLE_LIST*) sl->table_list.first,
sl->item_list,
sl->where,
- sl->ftfunc_list,
(sl->braces) ? (ORDER *) sl->order_list.first : (ORDER *) 0,
(ORDER*) sl->group_list.first,
sl->having,
@@ -127,7 +126,6 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
res=mysql_select(thd, (TABLE_LIST*) sl->table_list.first,
sl->item_list,
sl->where,
- sl->ftfunc_list,
(sl->braces) ? (ORDER *)sl->order_list.first : (ORDER *) 0,
(ORDER*) sl->group_list.first,
sl->having,
@@ -150,8 +148,12 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
/* Create a list of fields in the temporary table */
List_iterator<Item> it(item_list);
Field **field;
+#if 0
List<Item_func_match> ftfunc_list;
ftfunc_list.empty();
+#else
+ thd->lex.select_lex.ftfunc_list.empty();
+#endif
for (field=table->field ; *field ; field++)
{
@@ -170,7 +172,7 @@ int mysql_union(THD *thd, LEX *lex,select_result *result)
thd->options&= ~OPTION_FOUND_ROWS;
}
res=mysql_select(thd,&result_table_list,
- item_list, NULL, ftfunc_list, order,
+ item_list, NULL, /*ftfunc_list,*/ order,
(ORDER*) NULL, NULL, (ORDER*) NULL,
thd->options, result);
}
diff --git a/tools/mysqlmanager.c b/tools/mysqlmanager.c
index 896d889cdcc..aa645b609d8 100644
--- a/tools/mysqlmanager.c
+++ b/tools/mysqlmanager.c
@@ -330,7 +330,7 @@ LOG_MSG_FUNC(log_debug,LOG_DEBUG)
void log_debug(const char* __attribute__((unused)) fmt,...) {}
#endif
-static void handle_sigterm(int sig)
+static void handle_sigterm(int sig __attribute__((unused)))
{
log_info("Got SIGTERM");
if (!one_thread)