diff options
author | unknown <monty@donna.mysql.com> | 2000-12-24 15:19:00 +0200 |
---|---|---|
committer | unknown <monty@donna.mysql.com> | 2000-12-24 15:19:00 +0200 |
commit | 78cf07c8ea126fc03706988de23893ca4680ac77 (patch) | |
tree | 899e99579647137316d4fbc999750fb989b1cf2e /sql | |
parent | b23a560f84a33ee28d630f6baf7b4f7848fa3db8 (diff) | |
download | mariadb-git-78cf07c8ea126fc03706988de23893ca4680ac77.tar.gz |
New benchmark test
Fixed bug in REPLACE with BDB tables
Prepare for write lock on read for BDB
Inform the handler when we want to use IGNORE / REPLACE
New manual pages
Docs/manual.texi:
Updates for BDB tables and new changes
client/mysql.cc:
Cleanup
configure.in:
Added sys/ioctl.h
heap/hp_rkey.c:
Fixed bug when reading next on not unique key
include/my_base.h:
Added new extra options
man/mysql.1:
Added example
mysys/my_write.c:
Safety fix
scripts/mysqlaccess.sh:
Removed debug output
scripts/safe_mysqld.sh:
Added --open-files-limit
sql-bench/Results/ATIS-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/RUN-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/alter-table-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/big-tables-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/connect-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/create-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/insert-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/select-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/Results/wisconsin-mysql-Linux_2.2.14_my_SMP_i686:
Updated to new benchmark
sql-bench/bench-init.pl.sh:
Updated to new benchmark
sql-bench/server-cfg.sh:
Fixes for HEAP tables
sql-bench/test-ATIS.sh:
Fix for heap tables
sql-bench/test-insert.sh:
Added some ORDER BY benchmarks to test more things
sql/ha_berkeley.cc:
Fix a bug in REPLACE
sql/ha_berkeley.h:
Fix to handle lock_on_read
sql/mysql_priv.h:
Prepare for internal subtransactions in BDB
sql/mysqld.cc:
Added -O open_files_limit=#
sql/sql_insert.cc:
Inform the handler when we want to use IGNORE / REPLACE
sql/sql_load.cc:
Inform the handler when we want to use IGNORE / REPLACE
sql/sql_parse.cc:
Cleanup
sql/sql_show.cc:
Cleanup
sql/sql_table.cc:
Inform the handler when we want to use IGNORE / REPLACE
sql/sql_update.cc:
Inform the handler when we want to use IGNORE / REPLACE
support-files/binary-configure.sh:
Better message
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_berkeley.cc | 7 | ||||
-rw-r--r-- | sql/ha_berkeley.h | 6 | ||||
-rw-r--r-- | sql/mysql_priv.h | 3 | ||||
-rw-r--r-- | sql/mysqld.cc | 22 | ||||
-rw-r--r-- | sql/sql_insert.cc | 26 | ||||
-rw-r--r-- | sql/sql_load.cc | 5 | ||||
-rw-r--r-- | sql/sql_parse.cc | 5 | ||||
-rw-r--r-- | sql/sql_show.cc | 13 | ||||
-rw-r--r-- | sql/sql_table.cc | 4 | ||||
-rw-r--r-- | sql/sql_update.cc | 3 |
10 files changed, 71 insertions, 23 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 5f01d44e091..0db001783c1 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -76,7 +76,8 @@ const char *ha_berkeley_ext=".db"; bool berkeley_skip=0,berkeley_shared_data=0; -u_int32_t berkeley_init_flags= DB_PRIVATE, berkeley_lock_type=DB_LOCK_DEFAULT; +u_int32_t berkeley_init_flags= DB_PRIVATE | DB_RECOVER, berkeley_env_flags=0, + berkeley_lock_type=DB_LOCK_DEFAULT; ulong berkeley_cache_size; char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; long berkeley_lock_scan_time=0; @@ -125,6 +126,7 @@ bool berkeley_init(void) db_env->set_noticecall(db_env, berkeley_noticecall); db_env->set_tmp_dir(db_env, berkeley_tmpdir); db_env->set_data_dir(db_env, mysql_data_home); + db_env->set_flags(db_env, berkeley_env_flags, 1); if (berkeley_logdir) db_env->set_lg_dir(db_env, berkeley_logdir); @@ -790,6 +792,7 @@ int ha_berkeley::write_row(byte * record) error=file->put(file, transaction, create_key(&prim_key, primary_key, key_buff, record), &row, key_type[primary_key]); + last_dup_key=primary_key; } else { @@ -818,6 +821,8 @@ int ha_berkeley::write_row(byte * record) } } } + else + last_dup_key=primary_key; if (!error) { DBUG_PRINT("trans",("committing subtransaction")); diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index b84a195e81c..1e32fdb8a7c 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -53,7 +53,8 @@ class ha_berkeley: public handler ulong alloced_rec_buff_length; ulong changed_rows; uint primary_key,last_dup_key, hidden_primary_key, version; - bool fixed_length_row, fixed_length_primary_key, key_read; + u_int32_t lock_on_read; + bool fixed_length_row, fixed_length_primary_key, key_read, using_ignore; bool fix_rec_buff_for_blob(ulong length); byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH]; @@ -157,7 +158,8 @@ class ha_berkeley: public handler }; extern bool berkeley_skip, berkeley_shared_data; -extern u_int32_t berkeley_init_flags,berkeley_lock_type,berkeley_lock_types[]; +extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type, + berkeley_lock_types[]; extern ulong berkeley_cache_size, berkeley_max_lock; extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; extern long berkeley_lock_scan_time; diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 067e17c4356..c136d2948df 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -125,7 +125,7 @@ void kill_one_thread(THD *thd, ulong id); #define QUERY_PRIOR 6 #endif /* __WIN92__ */ - /* Bits fro testflag */ + /* Bits from testflag */ #define TEST_PRINT_CACHED_TABLES 1 #define TEST_NO_KEY_GROUP 2 #define TEST_MIT_THREAD 4 @@ -161,6 +161,7 @@ void kill_one_thread(THD *thd, ulong id); #define OPTION_BEGIN OPTION_NOT_AUTO_COMMIT*2 #define OPTION_QUICK OPTION_BEGIN*2 #define OPTION_QUOTE_SHOW_CREATE OPTION_QUICK*2 +#define OPTION_INTERNAL_SUBTRANSACTIONS OPTION_QUOTE_SHOW_CREATE*2 /* Set if we are updating a non-transaction safe table */ #define OPTION_STATUS_NO_TRANS_UPDATE OPTION_QUOTE_SHOW_CREATE*2 diff --git a/sql/mysqld.cc b/sql/mysqld.cc index ad5fc926afd..754ebc12766 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -203,7 +203,8 @@ ulong keybuff_size,sortbuff_size,max_item_sort_length,table_cache_size, thread_stack_min,net_wait_timeout,what_to_log= ~ (1L << (uint) COM_TIME), query_buff_size, lower_case_table_names, mysqld_net_retry_count, net_interactive_timeout, slow_launch_time = 2L, - net_read_timeout,net_write_timeout,slave_open_temp_tables=0; + net_read_timeout,net_write_timeout,slave_open_temp_tables=0, + open_files_limit=0; ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0; volatile ulong cached_thread_count=0; @@ -1461,8 +1462,10 @@ int main(int argc, char **argv) { uint wanted_files=10+(uint) max(max_connections*5, max_connections+table_cache_size*2); + set_if_bigger(wanted_files, open_files_limit); + // Note that some system returns 0 if we succeed here: uint files=set_maximum_open_files(wanted_files); - if (files && files < wanted_files) // Some systems return 0 + if (files && files < wanted_files && ! open_files_limit) { max_connections= (ulong) min((files-10),max_connections); table_cache_size= (ulong) max((files-10-max_connections)/2,64); @@ -2225,7 +2228,7 @@ enum options { OPT_BDB_HOME, OPT_BDB_LOG, OPT_BDB_TMP, OPT_BDB_NOSYNC, OPT_BDB_LOCK, OPT_BDB_SKIP, - OPT_BDB_RECOVER, OPT_BDB_SHARED, + OPT_BDB_NO_RECOVER, OPT_BDB_SHARED, OPT_MASTER_HOST, OPT_MASTER_USER, OPT_MASTER_PASSWORD, OPT_MASTER_PORT, OPT_MASTER_INFO_FILE, @@ -2252,7 +2255,7 @@ static struct option long_options[] = { {"bdb-home", required_argument, 0, (int) OPT_BDB_HOME}, {"bdb-lock-detect", required_argument, 0, (int) OPT_BDB_LOCK}, {"bdb-logdir", required_argument, 0, (int) OPT_BDB_LOG}, - {"bdb-recover", no_argument, 0, (int) OPT_BDB_RECOVER}, + {"bdb-no-recover", no_argument, 0, (int) OPT_BDB_NO_RECOVER}, {"bdb-no-sync", no_argument, 0, (int) OPT_BDB_NOSYNC}, {"bdb-shared-data", no_argument, 0, (int) OPT_BDB_SHARED}, {"bdb-tmpdir", required_argument, 0, (int) OPT_BDB_TMP}, @@ -2463,6 +2466,8 @@ CHANGEABLE_VAR changeable_vars[] = { NET_READ_TIMEOUT, 1, 65535, 0, 1 }, { "net_write_timeout", (long*) &net_write_timeout, NET_WRITE_TIMEOUT, 1, 65535, 0, 1 }, + { "open_files_limit", (long*) &open_files_limit, + 0, 0, 65535, 0, 1}, { "query_buffer_size", (long*) &query_buff_size, 0, MALLOC_OVERHEAD, (long) ~0, MALLOC_OVERHEAD, IO_SIZE }, { "record_buffer", (long*) &my_default_record_cache_size, @@ -2543,6 +2548,7 @@ struct show_var_st init_vars[]= { {"net_read_timeout", (char*) &net_read_timeout, SHOW_LONG}, {"net_retry_count", (char*) &mysqld_net_retry_count, SHOW_LONG}, {"net_write_timeout", (char*) &net_write_timeout, SHOW_LONG}, + {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, {"pid_file", (char*) pidfile_name, SHOW_CHAR}, {"port", (char*) &mysql_port, SHOW_INT}, {"protocol_version", (char*) &protocol_version, SHOW_INT}, @@ -2744,7 +2750,7 @@ static void usage(void) (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec)\n\ --bdb-logdir=directory Berkeley DB log file directory\n\ --bdb-no-sync Don't synchronously flush logs\n\ - --bdb-recover Start Berkeley DB in recover mode\n\ + --bdb-no-recover Don't try to recover Berkeley DB tables on start\n\ --bdb-shared-data Start Berkeley DB in multi-process mode\n\ --bdb-tmpdir=directory Berkeley DB tempfile name\n\ --skip-bdb Don't use berkeley db (will save memory)\n\ @@ -3233,10 +3239,10 @@ static void get_options(int argc,char **argv) berkeley_home=optarg; break; case OPT_BDB_NOSYNC: - berkeley_init_flags|=DB_TXN_NOSYNC; + berkeley_env_flags|=DB_TXN_NOSYNC; break; - case OPT_BDB_RECOVER: - berkeley_init_flags|=DB_RECOVER; + case OPT_BDB_NO_RECOVER: + berkeley_init_flags&= ~(DB_RECOVER); break; case OPT_BDB_TMP: berkeley_tmpdir=optarg; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 13da095607d..5632e9be35a 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -190,6 +190,8 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, error=0; id=0; thd->proc_info="update"; + if (duplic == DUP_IGNORE || duplic == DUP_REPLACE) + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); while ((values = its++)) { if (fields.elements || !value_count) @@ -281,6 +283,8 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, List<Item> &fields, table->next_number_field=0; thd->count_cuted_fields=0; thd->next_insert_id=0; // Reset this if wrongly used + if (duplic == DUP_IGNORE || duplic == DUP_REPLACE) + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); if (error) goto abort; @@ -1056,6 +1060,7 @@ bool delayed_insert::handle_inserts(void) { int error; uint max_rows; + bool using_ignore=0; DBUG_ENTER("handle_inserts"); /* Allow client to insert new rows */ @@ -1096,6 +1101,12 @@ bool delayed_insert::handle_inserts(void) table->time_stamp=row->time_stamp; info.handle_duplicates= row->dup; + if (info.handle_duplicates == DUP_IGNORE || + info.handle_duplicates == DUP_REPLACE) + { + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + using_ignore=1; + } thd.net.last_errno = 0; // reset error for binlog if (write_record(table,&info)) { @@ -1105,6 +1116,11 @@ bool delayed_insert::handle_inserts(void) pthread_mutex_unlock(&LOCK_delayed_status); row->log_query = 0; } + if (using_ignore) + { + using_ignore=0; + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + } if (row->query && row->log_query) { mysql_update_log.write(&thd,row->query, row->query_length); @@ -1192,6 +1208,9 @@ select_insert::prepare(List<Item> &values) thd->cuted_fields=0; if (info.handle_duplicates != DUP_REPLACE) table->file->extra(HA_EXTRA_WRITE_CACHE); + if (info.handle_duplicates == DUP_IGNORE || + info.handle_duplicates == DUP_REPLACE) + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->deactivate_non_unique_index((ha_rows) 0); DBUG_RETURN(0); } @@ -1203,6 +1222,7 @@ select_insert::~select_insert() if (save_time_stamp) table->time_stamp=save_time_stamp; table->next_number_field=0; + table->file->extra(HA_EXTRA_RESET); } thd->count_cuted_fields=0; } @@ -1245,6 +1265,7 @@ bool select_insert::send_eof() int error,error2; if (!(error=table->file->extra(HA_EXTRA_NO_CACHE))) error=table->file->activate_all_index(thd); + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error) error=error2; @@ -1306,6 +1327,9 @@ select_create::prepare(List<Item> &values) restore_record(table,2); // Get empty record thd->count_cuted_fields=1; // count warnings thd->cuted_fields=0; + if (info.handle_duplicates == DUP_IGNORE || + info.handle_duplicates == DUP_REPLACE) + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); DBUG_RETURN(0); } @@ -1338,6 +1362,7 @@ bool select_create::send_eof() abort(); else { + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); VOID(pthread_mutex_lock(&LOCK_open)); mysql_unlock_tables(thd, lock); if (!table->tmp_table) @@ -1358,6 +1383,7 @@ void select_create::abort() } if (table) { + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); enum db_type table_type=table->db_type; if (!table->tmp_table) hash_delete(&open_cache,(byte*) table); diff --git a/sql/sql_load.cc b/sql/sql_load.cc index b73f6893225..17f94e88b9b 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -214,6 +214,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table->time_stamp=0; table->next_number_field=table->found_next_number_field; VOID(table->file->extra(HA_EXTRA_WRITE_CACHE)); + if (handle_duplicates == DUP_IGNORE || + handle_duplicates == DUP_REPLACE) + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->deactivate_non_unique_index((ha_rows) 0); table->copy_blobs=1; if (!field_term->length() && !enclosed->length()) @@ -223,7 +226,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (table->file->extra(HA_EXTRA_NO_CACHE) || table->file->activate_all_index(thd)) error=1; /* purecov: inspected */ - + table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->time_stamp=save_time_stamp; table->next_number_field=0; if (thd->lock) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index fef35ed88d0..2866c6c3f26 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -73,14 +73,15 @@ static void init_signals(void) static inline bool end_active_trans(THD *thd) { + int error=0; if (thd->options & (OPTION_NOT_AUTO_COMMIT | OPTION_BEGIN)) { thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); thd->server_status&= ~SERVER_STATUS_IN_TRANS; if (ha_commit(thd)) - return 1; + error=1; } - return 0; + return error; } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 3094ff26d89..4424a0a6750 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -848,13 +848,12 @@ store_create_info(THD *thd, TABLE *table, String *packet) packet->append(" CHECKSUM=1", 11); if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) packet->append(" DELAY_KEY_WRITE=1",18); - if(table->comment) - { - packet->append(" COMMENT='", 10); - append_unescaped(packet, table->comment); - packet->append('\''); - } - + if(table->comment && table->comment[0]) + { + packet->append(" COMMENT='", 10); + append_unescaped(packet, table->comment); + packet->append('\''); + } DBUG_RETURN(0); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 778a60859b2..332537cad4e 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1658,7 +1658,8 @@ copy_data_between_tables(TABLE *from,TABLE *to, }; init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); - + if (handle_duplicates == DUP_IGNORE) + to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); next_field=to->next_number_field; while (!(error=info.read_record(&info))) { @@ -1694,6 +1695,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, to->file->print_error(tmp_error,MYF(0)); error=1; } + to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); if (to->file->activate_all_index(thd)) error=1; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index be5229b2ea4..b7b9b23d79d 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -210,6 +210,8 @@ int mysql_update(THD *thd,TABLE_LIST *table_list,List<Item> &fields, if (!(test_flags & TEST_READCHECK)) /* For debugging */ VOID(table->file->extra(HA_EXTRA_NO_READCHECK)); + if (handle_duplicates == DUP_IGNORE) + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); init_read_record(&info,thd,table,select,0,1); ha_rows updated=0L,found=0L; @@ -250,6 +252,7 @@ int mysql_update(THD *thd,TABLE_LIST *table_list,List<Item> &fields, end_read_record(&info); thd->proc_info="end"; VOID(table->file->extra(HA_EXTRA_READCHECK)); + VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY)); table->time_stamp=save_time_stamp; // Restore auto timestamp pointer using_transactions=table->file->has_transactions(); if (updated && (error <= 0 || !using_transactions)) |