diff options
author | unknown <serg@sergbook.mysql.com> | 2007-04-16 10:37:50 +0200 |
---|---|---|
committer | unknown <serg@sergbook.mysql.com> | 2007-04-16 10:37:50 +0200 |
commit | 7cb8a33b1af33dcd8806ec6021655a39f375d904 (patch) | |
tree | df4c92bcf5668ec88685604378ad6dc60bfcf269 /storage | |
parent | 2e73a53e033741b09a652646d9cfa0e76c33e6a1 (diff) | |
parent | add378761542ade65340b9477ed298e9a1677b10 (diff) | |
download | mariadb-git-7cb8a33b1af33dcd8806ec6021655a39f375d904.tar.gz |
Merge bk-internal.mysql.com:/home/bk/mysql-5.1
into sergbook.mysql.com:/usr/home/serg/Abk/mysql-5.1-wl2936
client/mysql.cc:
Auto merged
include/my_global.h:
Auto merged
include/my_sys.h:
Auto merged
include/mysql.h:
Auto merged
mysql-test/r/im_utils.result:
Auto merged
mysql-test/r/variables.result:
Auto merged
mysql-test/t/ndb_dd_basic.test:
Auto merged
mysql-test/t/partition_innodb.test:
Auto merged
mysql-test/t/variables.test:
Auto merged
mysys/array.c:
Auto merged
mysys/typelib.c:
Auto merged
sql/event_queue.cc:
Auto merged
sql/ha_partition.cc:
Auto merged
sql/ha_partition.h:
Auto merged
sql/handler.cc:
Auto merged
sql/handler.h:
Auto merged
sql/item_func.cc:
Auto merged
sql/item_sum.cc:
Auto merged
sql/log.cc:
Auto merged
sql/mysql_priv.h:
Auto merged
sql/set_var.h:
Auto merged
sql/sql_base.cc:
Auto merged
sql/sql_cache.cc:
Auto merged
sql/sql_class.h:
Auto merged
sql/sql_delete.cc:
Auto merged
sql/sql_insert.cc:
Auto merged
sql/sql_lex.cc:
Auto merged
sql/sql_lex.h:
Auto merged
sql/sql_parse.cc:
Auto merged
sql/sql_partition.cc:
Auto merged
sql/sql_plugin.cc:
Auto merged
sql/sql_repl.cc:
Auto merged
sql/sql_select.cc:
Auto merged
sql/sql_show.cc:
Auto merged
sql/sql_table.cc:
Auto merged
sql/table.cc:
Auto merged
sql/table.h:
Auto merged
storage/example/ha_example.cc:
Auto merged
storage/federated/ha_federated.cc:
Auto merged
storage/heap/ha_heap.cc:
Auto merged
storage/innobase/include/trx0trx.h:
Auto merged
storage/myisam/ha_myisam.cc:
Auto merged
storage/myisammrg/ha_myisammrg.cc:
Auto merged
storage/ndb/src/mgmsrv/InitConfigFileParser.cpp:
Auto merged
include/typelib.h:
merged
mysql-test/mysql-test-run.pl:
merged
mysql-test/r/flush2.result:
merged
mysql-test/r/ndb_dd_basic.result:
merged
mysql-test/r/partition_innodb.result:
merged
mysql-test/r/ps_1general.result:
merged
mysql-test/t/ps_1general.test:
merged
sql/ha_ndbcluster.cc:
merged
sql/item_create.cc:
merged
sql/mysqld.cc:
merged
sql/rpl_utility.h:
merged
sql/set_var.cc:
merged
sql/sql_class.cc:
merged
sql/sql_yacc.yy:
merged
storage/innobase/handler/ha_innodb.cc:
merged
storage/innobase/handler/ha_innodb.h:
merged
Diffstat (limited to 'storage')
-rw-r--r-- | storage/example/ha_example.cc | 5 | ||||
-rw-r--r-- | storage/federated/ha_federated.cc | 11 | ||||
-rw-r--r-- | storage/heap/ha_heap.cc | 33 | ||||
-rw-r--r-- | storage/innobase/handler/ha_innodb.cc | 708 | ||||
-rw-r--r-- | storage/innobase/handler/ha_innodb.h | 12 | ||||
-rw-r--r-- | storage/innobase/include/trx0trx.h | 8 | ||||
-rw-r--r-- | storage/myisam/ha_myisam.cc | 40 | ||||
-rw-r--r-- | storage/myisammrg/ha_myisammrg.cc | 37 | ||||
-rw-r--r-- | storage/ndb/src/mgmsrv/InitConfigFileParser.cpp | 27 |
9 files changed, 504 insertions, 377 deletions
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index a4cdcafc6d0..53af99dee97 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -723,6 +723,11 @@ int ha_example::external_lock(THD *thd, int lock_type) Called from lock.cc by get_lock_data(). + @note + In this method one should NEVER rely on table->in_use, it may, in fact, + refer to a different thread! (this happens if get_lock_data() is called + from mysql_lock_abort_for_thread() function) + @see get_lock_data() in lock.cc */ diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index aa7184268f5..da8ad46f96d 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -1832,7 +1832,7 @@ int ha_federated::write_row(byte *buf) values_string.length(0); insert_string.length(0); insert_field_value_string.length(0); - statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -2299,8 +2299,7 @@ int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, *result= 0; // In case of errors index_string.length(0); sql_query.length(0); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); sql_query.append(share->select_query); @@ -2414,8 +2413,7 @@ int ha_federated::read_range_next() int ha_federated::index_next(byte *buf) { DBUG_ENTER("ha_federated::index_next"); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); DBUG_RETURN(read_next(buf, stored_result)); } @@ -2616,8 +2614,7 @@ int ha_federated::rnd_pos(byte *buf, byte *pos) { int result; DBUG_ENTER("ha_federated::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); if (table->s->primary_key != MAX_KEY) { /* We have a primary key, so use index_read_idx to find row */ diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 8c378f7334f..096b0f63b5e 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -181,7 +181,7 @@ void ha_heap::update_key_stats() int ha_heap::write_row(byte * buf) { int res; - statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -205,7 +205,7 @@ int ha_heap::write_row(byte * buf) int ha_heap::update_row(const byte * old_data, byte * new_data) { int res; - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); res= heap_update(file,old_data,new_data); @@ -224,7 +224,7 @@ int ha_heap::update_row(const byte * old_data, byte * new_data) int ha_heap::delete_row(const byte * buf) { int res; - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); res= heap_delete(file,buf); if (!res && table->s->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) @@ -242,8 +242,7 @@ int ha_heap::index_read(byte * buf, const byte * key, key_part_map keypart_map, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -252,8 +251,7 @@ int ha_heap::index_read(byte * buf, const byte * key, key_part_map keypart_map, int ha_heap::index_read_last(byte *buf, const byte *key, key_part_map keypart_map) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error= heap_rkey(file, buf, active_index, key, keypart_map, HA_READ_PREFIX_LAST); table->status= error ? STATUS_NOT_FOUND : 0; @@ -264,8 +262,7 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, key_part_map keypart_map, enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error = heap_rkey(file, buf, index, key, keypart_map, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -274,8 +271,7 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, int ha_heap::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=heap_rnext(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -284,8 +280,7 @@ int ha_heap::index_next(byte * buf) int ha_heap::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); int error=heap_rprev(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -294,8 +289,7 @@ int ha_heap::index_prev(byte * buf) int ha_heap::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); int error=heap_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -304,8 +298,7 @@ int ha_heap::index_first(byte * buf) int ha_heap::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); int error=heap_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -318,8 +311,7 @@ int ha_heap::rnd_init(bool scan) int ha_heap::rnd_next(byte *buf) { - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); int error=heap_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -329,8 +321,7 @@ int ha_heap::rnd_pos(byte * buf, byte *pos) { int error; HEAP_PTR heap_position; - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); error=heap_rrnd(file, buf, heap_position); table->status=error ? STATUS_NOT_FOUND: 0; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 1932f775a3d..e7bf0f94195 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -54,7 +54,7 @@ bool innodb_inited= 0; This needs to exist until the query cache callback is removed or learns to pass hton. */ -static handlerton *legacy_innodb_hton; +static handlerton *innodb_hton_ptr; /* Store MySQL definition of 'byte': in Linux it is char while InnoDB uses unsigned char; the header univ.i which we include next defines @@ -92,7 +92,6 @@ extern "C" { #include "../storage/innobase/include/ha_prototypes.h" } -ulong innobase_large_page_size = 0; /* The default values for the following, type long or longlong, start-up parameters are declared in mysqld.cc: */ @@ -119,19 +118,29 @@ char* innobase_unix_file_flush_method = NULL; /* Below we have boolean-valued start-up parameters, and their default values */ +static ulong innobase_fast_shutdown = 1; +#ifdef UNIV_LOG_ARCHIVE +static my_bool innobase_log_archive = FALSE;/* unused */ +#endif /* UNIG_LOG_ARCHIVE */ +static my_bool innobase_use_doublewrite = TRUE; +static my_bool innobase_use_checksums = TRUE; -my_bool innobase_use_large_pages = FALSE; -my_bool innobase_use_native_aio = FALSE; +static my_bool innobase_file_per_table = FALSE; +static my_bool innobase_locks_unsafe_for_binlog = FALSE; +static my_bool innobase_rollback_on_timeout = FALSE; +static my_bool innobase_create_status_file = FALSE; +static my_bool innobase_stats_on_metadata = TRUE; -static char *internal_innobase_data_file_path = NULL; +static +char* internal_innobase_data_file_path = NULL; /* The following counter is used to convey information to InnoDB about server activity: in selects it is not sensible to call @@ -165,6 +174,17 @@ static handler *innobase_create_handler(handlerton *hton, static const char innobase_hton_name[]= "InnoDB"; + +static MYSQL_THDVAR_BOOL(support_xa, PLUGIN_VAR_OPCMDARG, + "Enable InnoDB support for the XA two-phase commit", + /* check_func */ NULL, /* update_func */ NULL, + /* default */ TRUE); + +static MYSQL_THDVAR_BOOL(table_locks, PLUGIN_VAR_OPCMDARG, + "Enable InnoDB locking in LOCK TABLES", + /* check_func */ NULL, /* update_func */ NULL, + /* default */ TRUE); + static handler *innobase_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) @@ -469,10 +489,9 @@ trx_t*& thd_to_trx( /*=======*/ /* out: reference to transaction pointer */ - THD* thd, /* in: MySQL thread */ - handlerton* hton) /* in: InnoDB handlerton */ + THD* thd) /* in: MySQL thread */ { - return(*(trx_t**) thd_ha_data(thd, hton)); + return(*(trx_t**) thd_ha_data(thd, innodb_hton_ptr)); } /************************************************************************ @@ -493,7 +512,7 @@ innobase_release_temporary_latches( return 0; } - trx = thd_to_trx(thd, hton); + trx = thd_to_trx(thd); if (trx) { innobase_release_stat_resources(trx); @@ -673,78 +692,12 @@ innobase_mysql_print_thd( uint max_query_len) /* in: max query length to print, or 0 to use the default max length */ { - const THD* thd; - const Security_context *sctx; - const char* s; - - thd = (const THD*) input_thd; - /* We probably want to have original user as part of debug output. */ - sctx = &thd->main_security_ctx; - - - fprintf(f, "MySQL thread id %lu, query id %lu", - thd->thread_id, (ulong) thd->query_id); - if (sctx->host) { - putc(' ', f); - fputs(sctx->host, f); - } - - if (sctx->ip) { - putc(' ', f); - fputs(sctx->ip, f); - } - - if (sctx->user) { - putc(' ', f); - fputs(sctx->user, f); - } - - if ((s = thd->proc_info)) { - putc(' ', f); - fputs(s, f); - } - - if ((s = thd->query)) { - /* 3100 is chosen because currently 3000 is the maximum - max_query_len we ever give this. */ - char buf[3100]; - uint len; - - /* If buf is too small, we dynamically allocate storage - in this. */ - char* dyn_str = NULL; - - /* Points to buf or dyn_str. */ - char* str = buf; - - if (max_query_len == 0) { - /* ADDITIONAL SAFETY: the default is to print at - most 300 chars to reduce the probability of a - seg fault if there is a race in - thd->query_length in MySQL; after May 14, 2004 - probably no race any more, but better be - safe */ - max_query_len = 300; - } - - len = min(thd->query_length, max_query_len); - - if (len > (sizeof(buf) - 1)) { - dyn_str = my_malloc(len + 1, MYF(0)); - str = dyn_str; - } - - /* Use strmake to reduce the timeframe for a race, - compared to fwrite() */ - len = (uint) (strmake(str, s, len) - str); - putc('\n', f); - fwrite(str, 1, len, f); - - if (dyn_str) { - my_free(dyn_str, MYF(0)); - } - } + THD* thd; + char buffer[1024]; + thd = (THD*) input_thd; + fputs(thd_security_context(thd, buffer, sizeof(buffer), + max_query_len), f); putc('\n', f); } @@ -953,7 +906,7 @@ check_trx_exists( handlerton* hton, /* in: handlerton for innodb */ THD* thd) /* in: user thread handle */ { - trx_t*& trx = thd_to_trx(thd, hton); + trx_t*& trx = thd_to_trx(thd); ut_ad(thd == current_thd); @@ -967,7 +920,9 @@ check_trx_exists( /* Update the info whether we should skip XA steps that eat CPU time */ - trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + trx->support_xa = THDVAR(thd, support_xa); + + thd_to_trx(thd) = trx; } else { if (trx->magic_n != TRX_MAGIC_N) { mem_analyze_corruption(trx); @@ -976,13 +931,13 @@ check_trx_exists( } } - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } else { trx->check_foreigns = TRUE; } - if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) { + if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { trx->check_unique_secondary = FALSE; } else { trx->check_unique_secondary = TRUE; @@ -1069,7 +1024,7 @@ innobase_register_trx_and_stmt( innobase_register_stmt(hton, thd); - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* No autocommit mode, register for a transaction */ trans_register_ha(thd, TRUE, hton); @@ -1161,14 +1116,15 @@ innobase_query_caching_of_table_permitted( ut_a(full_name_len < 999); - if (thd->variables.tx_isolation == ISO_SERIALIZABLE) { + trx = check_trx_exists(innodb_hton_ptr, thd); + + if (trx->isolation_level == TRX_ISO_SERIALIZABLE) { /* In the SERIALIZABLE mode we add LOCK IN SHARE MODE to every plain SELECT if AUTOCOMMIT is not on. */ return((my_bool)FALSE); } - trx = check_trx_exists(legacy_innodb_hton, thd); if (trx->has_search_latch) { ut_print_timestamp(stderr); sql_print_error("The calling thread is holding the adaptive " @@ -1182,7 +1138,7 @@ innobase_query_caching_of_table_permitted( innobase_release_stat_resources(trx); - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { is_autocommit = TRUE; } else { @@ -1227,7 +1183,7 @@ innobase_query_caching_of_table_permitted( if (trx->active_trans == 0) { - innobase_register_trx_and_stmt(legacy_innodb_hton, thd); + innobase_register_trx_and_stmt(innodb_hton_ptr, thd); trx->active_trans = 1; } @@ -1367,7 +1323,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) one. Update the trx pointers in the prebuilt struct. Normally this operation is done in external_lock. */ - update_thd(current_thd); + update_thd(ha_thd()); /* Initialize the prebuilt struct much like it would be inited in external_lock */ @@ -1386,7 +1342,7 @@ ha_innobase::init_table_handle_for_HANDLER(void) if (prebuilt->trx->active_trans == 0) { - innobase_register_trx_and_stmt(ht, current_thd); + innobase_register_trx_and_stmt(ht, ha_thd()); prebuilt->trx->active_trans = 1; } @@ -1430,9 +1386,9 @@ innobase_init(void *p) DBUG_ENTER("innobase_init"); handlerton *innobase_hton= (handlerton *)p; - legacy_innodb_hton= innobase_hton; + innodb_hton_ptr= innobase_hton; - innobase_hton->state=have_innodb; + innobase_hton->state= SHOW_OPTION_YES; innobase_hton->db_type= DB_TYPE_INNODB; innobase_hton->savepoint_offset=sizeof(trx_named_savept_t); innobase_hton->close_connection=innobase_close_connection; @@ -1457,9 +1413,6 @@ innobase_init(void *p) innobase_hton->flags=HTON_NO_FLAGS; innobase_hton->release_temporary_latches=innobase_release_temporary_latches; - if (have_innodb != SHOW_OPTION_YES) - DBUG_RETURN(0); // nothing else to do - ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); #ifdef UNIV_DEBUG @@ -1641,8 +1594,10 @@ innobase_init(void *p) srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; srv_use_checksums = (ibool) innobase_use_checksums; - os_use_large_pages = (ibool) innobase_use_large_pages; - os_large_page_size = (ulint) innobase_large_page_size; +#ifdef HAVE_LARGE_PAGES + if ((os_use_large_pages = (ibool) my_use_large_pages)) + os_large_page_size = (ulint) opt_large_page_size; +#endif row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout; @@ -1701,7 +1656,6 @@ innobase_init(void *p) DBUG_RETURN(FALSE); error: - have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler DBUG_RETURN(TRUE); } @@ -1844,7 +1798,7 @@ innobase_commit( trx = check_trx_exists(hton, thd); /* Update the info whether we should skip XA steps that eat CPU time */ - trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + trx->support_xa = THDVAR(thd, support_xa); /* Since we will reserve the kernel mutex, we have to release the search system latch first to obey the latching order. */ @@ -1875,7 +1829,7 @@ innobase_commit( " trx->conc_state != TRX_NOT_STARTED"); } if (all - || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* We were instructed to commit the whole transaction, or this is an SQL statement end and autocommit is on */ @@ -2037,7 +1991,7 @@ innobase_commit_complete( { trx_t* trx; - trx = thd_to_trx(thd, hton); + trx = thd_to_trx(thd); if (trx && trx->active_trans) { @@ -2077,7 +2031,7 @@ innobase_rollback( trx = check_trx_exists(hton, thd); /* Update the info whether we should skip XA steps that eat CPU time */ - trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + trx->support_xa = THDVAR(thd, support_xa); /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch @@ -2094,7 +2048,7 @@ innobase_rollback( } if (all - || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { error = trx_rollback_for_mysql(trx); trx->active_trans = 0; @@ -2226,7 +2180,7 @@ innobase_savepoint( (unless we are in sub-statement), so SQL layer ensures that this method is never called in such situation. */ - DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || + DBUG_ASSERT(thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || thd->in_sub_stmt); trx = check_trx_exists(hton, thd); @@ -2262,7 +2216,7 @@ innobase_close_connection( { trx_t* trx; - trx = thd_to_trx(thd, hton); + trx = thd_to_trx(thd); ut_a(trx); @@ -2397,7 +2351,7 @@ ha_innobase::open( UT_NOT_USED(mode); UT_NOT_USED(test_if_locked); - thd = current_thd; + thd = ha_thd(); normalize_table_name(norm_name, name); user_thd = NULL; @@ -3349,8 +3303,8 @@ ha_innobase::write_row( longlong auto_inc; longlong dummy; ibool auto_inc_used= FALSE; - THD* thd = current_thd; - trx_t* trx = thd_to_trx(thd, ht); + THD *thd= ha_thd(); + trx_t* trx = thd_to_trx(thd); DBUG_ENTER("ha_innobase::write_row"); @@ -3374,10 +3328,10 @@ ha_innobase::write_row( if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); - if ((user_thd->lex->sql_command == SQLCOM_ALTER_TABLE - || user_thd->lex->sql_command == SQLCOM_OPTIMIZE - || user_thd->lex->sql_command == SQLCOM_CREATE_INDEX - || user_thd->lex->sql_command == SQLCOM_DROP_INDEX) + if ((thd_sql_command(thd) == SQLCOM_ALTER_TABLE + || thd_sql_command(thd) == SQLCOM_OPTIMIZE + || thd_sql_command(thd) == SQLCOM_CREATE_INDEX + || thd_sql_command(thd) == SQLCOM_DROP_INDEX) && num_write_row >= 10000) { /* ALTER TABLE is COMMITted at every 10000 copied rows. The IX table lock for the original table has to be re-issued. @@ -3529,10 +3483,11 @@ no_commit: performing those statements. */ if (error == DB_DUPLICATE_KEY && auto_inc_used - && (user_thd->lex->sql_command == SQLCOM_REPLACE - || user_thd->lex->sql_command == SQLCOM_REPLACE_SELECT - || (user_thd->lex->sql_command == SQLCOM_LOAD - && user_thd->lex->duplicates == DUP_REPLACE))) { + && (thd_sql_command(thd) == SQLCOM_REPLACE + || thd_sql_command(thd) == SQLCOM_REPLACE_SELECT + || (thd_sql_command(thd) == SQLCOM_LOAD + && prebuilt->trx->allow_duplicates + && prebuilt->trx->replace_duplicates))) { auto_inc = table->next_number_field->val_int(); @@ -3720,7 +3675,7 @@ ha_innobase::update_row( { upd_t* uvect; int error = 0; - trx_t* trx = thd_to_trx(current_thd, ht); + trx_t* trx = thd_to_trx(ha_thd()); DBUG_ENTER("ha_innobase::update_row"); @@ -3773,7 +3728,7 @@ ha_innobase::delete_row( const mysql_byte* record) /* in: a row in MySQL format */ { int error = 0; - trx_t* trx = thd_to_trx(current_thd, ht); + trx_t* trx = thd_to_trx(ha_thd()); DBUG_ENTER("ha_innobase::delete_row"); @@ -3852,7 +3807,7 @@ void ha_innobase::try_semi_consistent_read(bool yes) /*===========================================*/ { - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); /* Row read type is set to semi consistent read if this was requested by the MySQL and either innodb_locks_unsafe_for_binlog @@ -4017,7 +3972,7 @@ ha_innobase::index_read( DBUG_ENTER("index_read"); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); ha_statistic_increment(&SSV::ha_read_key_count); @@ -4117,11 +4072,11 @@ ha_innobase::change_active_index( InnoDB */ { KEY* key=0; - ha_statistic_increment(&SSV::ha_read_key_count); DBUG_ENTER("change_active_index"); + ha_statistic_increment(&SSV::ha_read_key_count); - ut_ad(user_thd == current_thd); - ut_a(prebuilt->trx == thd_to_trx(user_thd, ht)); + ut_ad(user_thd == ha_thd()); + ut_a(prebuilt->trx == thd_to_trx(user_thd)); active_index = keynr; @@ -4209,7 +4164,7 @@ ha_innobase::general_fetch( DBUG_ENTER("general_fetch"); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -4434,7 +4389,7 @@ ha_innobase::rnd_pos( ha_statistic_increment(&SSV::ha_read_rnd_count); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4482,7 +4437,7 @@ ha_innobase::position( { uint len; - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -4791,7 +4746,7 @@ ha_innobase::create( uint i; char name2[FN_REFLEN]; char norm_name[FN_REFLEN]; - THD *thd= current_thd; + THD *thd= ha_thd(); ib_longlong auto_inc_value; ulint flags; @@ -4821,11 +4776,11 @@ ha_innobase::create( trx->mysql_thd = thd; trx->mysql_query_str = &((*thd).query); - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } - if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) { + if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { trx->check_unique_secondary = FALSE; } @@ -4981,7 +4936,7 @@ ha_innobase::discard_or_import_tablespace( ut_a(prebuilt->trx); ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); dict_table = prebuilt->table; trx = prebuilt->trx; @@ -5006,33 +4961,33 @@ ha_innobase::delete_all_rows(void) /* out: error number */ { int error; - THD* thd = current_thd; + THD* thd = ha_thd(); DBUG_ENTER("ha_innobase::delete_all_rows"); - if (thd->lex->sql_command != SQLCOM_TRUNCATE) { - fallback: - /* We only handle TRUNCATE TABLE t as a special case. - DELETE FROM t will have to use ha_innobase::delete_row(). */ - DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND); - } - /* Get the transaction associated with the current thd, or create one if not yet created, and update prebuilt->trx */ update_thd(thd); - /* Truncate the table in InnoDB */ + if (thd_sql_command(thd) == SQLCOM_TRUNCATE) { + /* Truncate the table in InnoDB */ - error = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx); - if (error == DB_ERROR) { - /* Cannot truncate; resort to ha_innobase::delete_row() */ - goto fallback; - } + error = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx); + if (error == DB_ERROR) { + /* Cannot truncate; resort to ha_innobase::delete_row() */ + goto fallback; + } - error = convert_error_code_to_mysql(error, NULL); + error = convert_error_code_to_mysql(error, NULL); - DBUG_RETURN(error); + DBUG_RETURN(error); + } + +fallback: + /* We only handle TRUNCATE TABLE t as a special case. + DELETE FROM t will have to use ha_innobase::delete_row(). */ + DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND); } /********************************************************************* @@ -5052,7 +5007,7 @@ ha_innobase::delete_table( int error; trx_t* parent_trx; trx_t* trx; - THD *thd= current_thd; + THD *thd= ha_thd(); char norm_name[1000]; DBUG_ENTER("ha_innobase::delete_table"); @@ -5075,75 +5030,75 @@ ha_innobase::delete_table( trx = trx_allocate_for_mysql(); - trx->mysql_thd = current_thd; - trx->mysql_query_str = &((*current_thd).query); + trx->mysql_thd = ha_thd(); + trx->mysql_query_str = &(ha_thd()->query); - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { - trx->check_foreigns = FALSE; - } + if (thd_test_options(thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { + trx->check_foreigns = FALSE; + } - if (thd->options & OPTION_RELAXED_UNIQUE_CHECKS) { - trx->check_unique_secondary = FALSE; - } + if (thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS)) { + trx->check_unique_secondary = FALSE; + } - name_len = strlen(name); + name_len = strlen(name); - assert(name_len < 1000); + assert(name_len < 1000); - /* Strangely, MySQL passes the table name without the '.frm' - extension, in contrast to ::create */ + /* Strangely, MySQL passes the table name without the '.frm' + extension, in contrast to ::create */ - normalize_table_name(norm_name, name); + normalize_table_name(norm_name, name); - /* Drop the table in InnoDB */ + /* Drop the table in InnoDB */ - error = row_drop_table_for_mysql(norm_name, trx, - thd->lex->sql_command == SQLCOM_DROP_DB); + error = row_drop_table_for_mysql(norm_name, trx, + thd_sql_command(thd) == SQLCOM_DROP_DB); - /* Flush the log to reduce probability that the .frm files and - the InnoDB data dictionary get out-of-sync if the user runs - with innodb_flush_log_at_trx_commit = 0 */ + /* Flush the log to reduce probability that the .frm files and + the InnoDB data dictionary get out-of-sync if the user runs + with innodb_flush_log_at_trx_commit = 0 */ - log_buffer_flush_to_disk(); + log_buffer_flush_to_disk(); - /* Tell the InnoDB server that there might be work for - utility threads: */ + /* Tell the InnoDB server that there might be work for + utility threads: */ - srv_active_wake_master_thread(); + srv_active_wake_master_thread(); - innobase_commit_low(trx); + innobase_commit_low(trx); - trx_free_for_mysql(trx); + trx_free_for_mysql(trx); - error = convert_error_code_to_mysql(error, NULL); + error = convert_error_code_to_mysql(error, NULL); - DBUG_RETURN(error); + DBUG_RETURN(error); } /********************************************************************* -Removes all tables in the named database inside InnoDB. */ + Removes all tables in the named database inside InnoDB. */ static void innobase_drop_database( -/*===================*/ - /* out: error number */ - handlerton *hton, /* in: handlerton of Innodb */ - char* path) /* in: database path; inside InnoDB the name - of the last directory in the path is used as - the database name: for example, in 'mysql/data/test' - the database name is 'test' */ -{ - ulint len = 0; - trx_t* parent_trx; - trx_t* trx; - char* ptr; - int error; - char* namebuf; - - /* Get the transaction associated with the current thd, or create one - if not yet created */ - - parent_trx = check_trx_exists(hton, current_thd); + /*===================*/ + /* out: error number */ + handlerton *hton, /* in: handlerton of Innodb */ + char* path) /* in: database path; inside InnoDB the name + of the last directory in the path is used as + the database name: for example, in 'mysql/data/test' + the database name is 'test' */ +{ + ulint len = 0; + trx_t* parent_trx; + trx_t* trx; + char* ptr; + int error; + char* namebuf; + + /* Get the transaction associated with the current thd, or create one + if not yet created */ + + parent_trx = check_trx_exists(hton, ha_thd()); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5170,7 +5125,7 @@ innobase_drop_database( trx->mysql_thd = current_thd; trx->mysql_query_str = &((*current_thd).query); - if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(current_thd, OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } @@ -5222,7 +5177,7 @@ ha_innobase::rename_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - parent_trx = check_trx_exists(ht, current_thd); + parent_trx = check_trx_exists(ht, ha_thd()); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5236,10 +5191,10 @@ ha_innobase::rename_table( } trx = trx_allocate_for_mysql(); - trx->mysql_thd = current_thd; - trx->mysql_query_str = &((*current_thd).query); + trx->mysql_thd = ha_thd(); + trx->mysql_query_str = &((*ha_thd()).query); - if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + if (thd_test_options(ha_thd(), OPTION_NO_FOREIGN_KEY_CHECKS)) { trx->check_foreigns = FALSE; } @@ -5307,7 +5262,7 @@ ha_innobase::records_in_range( DBUG_ENTER("records_in_range"); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); prebuilt->trx->op_info = (char*)"estimating records in index range"; @@ -5391,7 +5346,7 @@ ha_innobase::estimate_rows_upper_bound(void) external_lock(). To be safe, update the thd of the current table handle. */ - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*) "calculating upper bound for table rows"; @@ -5508,7 +5463,7 @@ ha_innobase::info( external_lock(). To be safe, update the thd of the current table handle. */ - update_thd(current_thd); + update_thd(ha_thd()); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5739,7 +5694,7 @@ ha_innobase::check( ulint ret; ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); if (prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template @@ -5781,7 +5736,7 @@ ha_innobase::update_table_comment( return((char*)comment); /* string too long */ } - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*)"returning table comment"; @@ -5852,7 +5807,7 @@ ha_innobase::get_foreign_key_create_info(void) external_lock(). To be safe, update the thd of the current table handle. */ - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*)"getting info on foreign keys"; @@ -5901,7 +5856,7 @@ ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) DBUG_ENTER("get_foreign_key_list"); ut_a(prebuilt != NULL); - update_thd(current_thd); + update_thd(ha_thd()); prebuilt->trx->op_info = (char*)"getting list of foreign keys"; trx_search_latch_release_if_reserved(prebuilt->trx); mutex_enter_noninline(&(dict_sys->mutex)); @@ -6036,7 +5991,7 @@ ha_innobase::can_switch_engines(void) DBUG_ENTER("ha_innobase::can_switch_engines"); - ut_a(prebuilt->trx == thd_to_trx(current_thd, ht)); + ut_a(prebuilt->trx == thd_to_trx(ha_thd())); prebuilt->trx->op_info = "determining if there are foreign key constraints"; @@ -6117,6 +6072,19 @@ ha_innobase::extra( case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: prebuilt->keep_other_fields_on_keyread = 1; break; + case HA_EXTRA_IGNORE_DUP_KEY: + prebuilt->trx->allow_duplicates= TRUE; + break; + case HA_EXTRA_WRITE_CAN_REPLACE: + prebuilt->trx->replace_duplicates= TRUE; + break; + case HA_EXTRA_WRITE_CANNOT_REPLACE: + prebuilt->trx->replace_duplicates= FALSE; + break; + case HA_EXTRA_NO_IGNORE_DUP_KEY: + prebuilt->trx->allow_duplicates= FALSE; + prebuilt->trx->replace_duplicates= FALSE; + break; default:/* Do nothing */ ; } @@ -6183,7 +6151,7 @@ ha_innobase::start_stmt( prebuilt->select_lock_type = LOCK_X; } else { if (trx->isolation_level != TRX_ISO_SERIALIZABLE - && thd->lex->sql_command == SQLCOM_SELECT + && thd_sql_command(thd) == SQLCOM_SELECT && lock_type == TL_READ) { /* For other than temporary tables, we obtain @@ -6295,8 +6263,8 @@ ha_innobase::external_lock( if (trx->isolation_level == TRX_ISO_SERIALIZABLE && prebuilt->select_lock_type == LOCK_NONE - && (thd->options - & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + && thd_test_options(thd, + OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* To get serializable execution, we let InnoDB conceptually add 'LOCK IN SHARE MODE' to all SELECTs @@ -6323,10 +6291,10 @@ ha_innobase::external_lock( if (prebuilt->select_lock_type != LOCK_NONE) { - if (thd->lex->sql_command == SQLCOM_LOCK_TABLES - && thd->variables.innodb_table_locks - && (thd->options & OPTION_NOT_AUTOCOMMIT) - && thd_in_lock_tables(thd)) { + if (thd_in_lock_tables(thd) && + thd_sql_command(thd) == SQLCOM_LOCK_TABLES && + THDVAR(thd, table_locks) && + thd_test_options(thd, OPTION_NOT_AUTOCOMMIT)) { ulint error = row_lock_table_for_mysql( prebuilt, NULL, 0); @@ -6363,7 +6331,7 @@ ha_innobase::external_lock( innobase_release_stat_resources(trx); - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { + if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { if (trx->active_trans != 0) { innobase_commit(ht, thd, TRUE); } @@ -6405,7 +6373,7 @@ ha_innobase::transactional_table_lock( update_thd(thd); if (prebuilt->table->ibd_file_missing - && !thd_tablespace_op(current_thd)) { + && !thd_tablespace_op(ha_thd())) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB error:\n" "MySQL is trying to use a table handle but the .ibd file for\n" @@ -6450,7 +6418,7 @@ ha_innobase::transactional_table_lock( trx->active_trans = 1; } - if (thd->variables.innodb_table_locks && thd_in_lock_tables(thd)) { + if (thd_in_lock_tables(thd) && THDVAR(thd, table_locks)) { ulint error = DB_SUCCESS; error = row_lock_table_for_mysql(prebuilt, NULL, 0); @@ -6460,7 +6428,7 @@ ha_innobase::transactional_table_lock( DBUG_RETURN((int) error); } - if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + if (thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { /* Store the current undo_no of the transaction so that we know where to roll back if we have @@ -6506,10 +6474,6 @@ innodb_show_status( DBUG_ENTER("innodb_show_status"); - if (have_innodb != SHOW_OPTION_YES) { - DBUG_RETURN(FALSE); - } - trx = check_trx_exists(hton, thd); innobase_release_stat_resources(trx); @@ -6800,8 +6764,7 @@ ha_innobase::store_lock( if (lock_type != TL_IGNORE && trx->n_mysql_tables_in_use == 0) { trx->isolation_level = innobase_map_isolation_level( - (enum_tx_isolation) - thd->variables.tx_isolation); + (enum_tx_isolation)thd_tx_isolation(thd)); if (trx->isolation_level <= TRX_ISO_READ_COMMITTED && trx->global_read_view) { @@ -6811,12 +6774,13 @@ ha_innobase::store_lock( read_view_close_for_mysql(trx); } - } + DBUG_ASSERT(thd == current_thd); const bool in_lock_tables = thd_in_lock_tables(thd); + const uint sql_command = thd_sql_command(thd); - if (thd->lex->sql_command == SQLCOM_DROP_TABLE) { + if (sql_command == SQLCOM_DROP_TABLE) { /* MySQL calls this function in DROP TABLE though this table handle may belong to another thd that is running a query. Let @@ -6826,7 +6790,7 @@ ha_innobase::store_lock( (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables) || lock_type == TL_READ_WITH_SHARED_LOCKS || lock_type == TL_READ_NO_INSERT || - (thd->lex->sql_command != SQLCOM_SELECT + (sql_command != SQLCOM_SELECT && lock_type != TL_IGNORE)) { /* The OR cases above are in this order: @@ -6855,9 +6819,9 @@ ha_innobase::store_lock( || isolation_level == TRX_ISO_READ_COMMITTED) && isolation_level != TRX_ISO_SERIALIZABLE && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) - && (thd->lex->sql_command == SQLCOM_INSERT_SELECT - || thd->lex->sql_command == SQLCOM_UPDATE - || thd->lex->sql_command == SQLCOM_CREATE_TABLE)) { + && (sql_command == SQLCOM_INSERT_SELECT + || sql_command == SQLCOM_UPDATE + || sql_command == SQLCOM_CREATE_TABLE)) { /* If we either have innobase_locks_unsafe_for_binlog option set or this session is using READ COMMITTED @@ -6870,7 +6834,7 @@ ha_innobase::store_lock( prebuilt->select_lock_type = LOCK_NONE; prebuilt->stored_select_lock_type = LOCK_NONE; - } else if (thd->lex->sql_command == SQLCOM_CHECKSUM) { + } else if (sql_command == SQLCOM_CHECKSUM) { /* Use consistent read for checksum table */ prebuilt->select_lock_type = LOCK_NONE; @@ -6900,7 +6864,7 @@ ha_innobase::store_lock( (if it does not use a consistent read). */ if (lock_type == TL_READ - && thd->lex->sql_command == SQLCOM_LOCK_TABLES) { + && sql_command == SQLCOM_LOCK_TABLES) { /* We come here if MySQL is processing LOCK TABLES ... READ LOCAL. MyISAM under that table lock type reads the table as it was at the time the lock was @@ -6928,22 +6892,22 @@ ha_innobase::store_lock( if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && !(in_lock_tables - && thd->lex->sql_command == SQLCOM_LOCK_TABLES) + && sql_command == SQLCOM_LOCK_TABLES) && !thd_tablespace_op(thd) - && thd->lex->sql_command != SQLCOM_TRUNCATE - && thd->lex->sql_command != SQLCOM_OPTIMIZE + && sql_command != SQLCOM_TRUNCATE + && sql_command != SQLCOM_OPTIMIZE #ifdef __WIN__ - /* For alter table on win32 for succesful operation - completion it is used TL_WRITE(=10) lock instead of - TL_WRITE_ALLOW_READ(=6), however here in innodb handler - TL_WRITE is lifted to TL_WRITE_ALLOW_WRITE, which causes - race condition when several clients do alter table - simultaneously (bug #17264). This fix avoids the problem. */ - && thd->lex->sql_command != SQLCOM_ALTER_TABLE + /* For alter table on win32 for succesful operation + completion it is used TL_WRITE(=10) lock instead of + TL_WRITE_ALLOW_READ(=6), however here in innodb handler + TL_WRITE is lifted to TL_WRITE_ALLOW_WRITE, which causes + race condition when several clients do alter table + simultaneously (bug #17264). This fix avoids the problem. */ + && sql_command != SQLCOM_ALTER_TABLE #endif - && thd->lex->sql_command != SQLCOM_CREATE_TABLE) { + && sql_command != SQLCOM_CREATE_TABLE) { lock_type = TL_WRITE_ALLOW_WRITE; } @@ -6956,10 +6920,10 @@ ha_innobase::store_lock( We especially allow concurrent inserts if MySQL is at the start of a stored procedure call (SQLCOM_CALL) - (MySQL does have in_lock_tables TRUE there). */ + (MySQL does have thd_in_lock_tables() TRUE there). */ if (lock_type == TL_READ_NO_INSERT - && thd->lex->sql_command != SQLCOM_LOCK_TABLES) { + && sql_command != SQLCOM_LOCK_TABLES) { lock_type = TL_READ; } @@ -6994,7 +6958,7 @@ ha_innobase::innobase_read_and_init_auto_inc( ut_a(prebuilt->table); /* Prepare prebuilt->trx in the table handle */ - update_thd(current_thd); + update_thd(ha_thd()); if (prebuilt->trx->conc_state == TRX_NOT_STARTED) { trx_was_not_started = TRUE; @@ -7137,7 +7101,7 @@ void ha_innobase::get_auto_increment( int error; /* Prepare prebuilt->trx in the table handle */ - update_thd(current_thd); + update_thd(ha_thd()); error = innobase_read_and_init_auto_inc(&nr); @@ -7166,7 +7130,7 @@ ha_innobase::reset_auto_increment(ulonglong value) int error; - update_thd(current_thd); + update_thd(ha_thd()); error = row_lock_table_autoinc_for_mysql(prebuilt); @@ -7185,7 +7149,7 @@ ha_innobase::reset_auto_increment(ulonglong value) bool ha_innobase::get_error_message(int error, String *buf) { - trx_t* trx = check_trx_exists(ht, current_thd); + trx_t* trx = check_trx_exists(ht, ha_thd()); buf->copy(trx->detailed_error, strlen(trx->detailed_error), system_charset_info); @@ -7401,6 +7365,7 @@ innobase_query_is_update(void) /*==========================*/ { THD* thd = current_thd; + trx_t* trx; if (!thd) { /* InnoDB's internal threads may run InnoDB stored procedures @@ -7410,17 +7375,9 @@ innobase_query_is_update(void) return(FALSE); } - switch (thd->lex->sql_command) { - case SQLCOM_REPLACE: - case SQLCOM_REPLACE_SELECT: - return(TRUE); - case SQLCOM_LOAD: - return(thd->lex->duplicates == DUP_REPLACE); - case SQLCOM_INSERT: - return(thd->lex->duplicates == DUP_UPDATE); - default: - return(FALSE); - } + trx = check_trx_exists(innodb_hton_ptr, thd); + + return(trx->allow_duplicates); } /*********************************************************************** @@ -7439,8 +7396,8 @@ innobase_xa_prepare( int error = 0; trx_t* trx = check_trx_exists(hton, thd); - if (thd->lex->sql_command != SQLCOM_XA_PREPARE && - (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) + if (thd_sql_command(thd) != SQLCOM_XA_PREPARE && + (all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* For ibbackup to work the order of transactions in binlog @@ -7466,7 +7423,7 @@ innobase_xa_prepare( trx->active_trans = 2; } - if (!thd->variables.innodb_support_xa) { + if (!THDVAR(thd, support_xa)) { return(0); } @@ -7486,7 +7443,7 @@ innobase_xa_prepare( } if (all - || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + || (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { /* We were instructed to prepare the whole transaction, or this is an SQL statement end and autocommit is on */ @@ -7678,6 +7635,223 @@ static SHOW_VAR innodb_status_variables_export[]= { static struct st_mysql_storage_engine innobase_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; +/* plugin options */ +static MYSQL_SYSVAR_BOOL(checksums, innobase_use_checksums, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Enable InnoDB checksums validation (enabled by default). " + "Disable with --skip-innodb-checksums.", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_STR(data_home_dir, innobase_data_home_dir, + PLUGIN_VAR_READONLY, + "The common part for InnoDB table spaces.", + NULL, NULL, NULL); + +static MYSQL_SYSVAR_BOOL(doublewrite, innobase_use_doublewrite, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Enable InnoDB doublewrite buffer (enabled by default). " + "Disable with --skip-innodb-doublewrite.", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_ULONG(fast_shutdown, innobase_fast_shutdown, + PLUGIN_VAR_OPCMDARG, + "Speeds up the shutdown process of the InnoDB storage engine. Possible " + "values are 0, 1 (faster)" + /* + NetWare can't close unclosed files, can't automatically kill remaining + threads, etc, so on this OS we disable the crash-like InnoDB shutdown. + */ +#ifndef __NETWARE__ + " or 2 (fastest - crash-like)" +#endif + ".", + NULL, NULL, 1, 0, IF_NETWARE(1,2), 0); + +static MYSQL_SYSVAR_BOOL(file_per_table, innobase_file_per_table, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Stores each InnoDB table to an .ibd file in the database dir.", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit, + PLUGIN_VAR_OPCMDARG, + "Set to 0 (write and flush once per second), 1 (write and flush at each commit)\ + or 2 (write at commit, flush once per second).", + NULL, NULL, 1, 0, 2, 0); + +static MYSQL_SYSVAR_STR(flush_method, innobase_unix_file_flush_method, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "With which method to flush data.", NULL, NULL, NULL); + +static MYSQL_SYSVAR_BOOL(locks_unsafe_for_binlog, innobase_locks_unsafe_for_binlog, + PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY, + "Force InnoDB to not use next-key locking, to use only row-level locking.", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_STR(log_arch_dir, innobase_log_arch_dir, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Where full logs should be archived.", NULL, NULL, NULL); + +#ifdef UNIV_LOG_ARCHIVE +static MYSQL_SYSVAR_BOOL(log_archive, innobase_log_archive, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Set to 1 if you want to have logs archived.", NULL, NULL, FALSE); +#endif /* UNIV_LOG_ARCHIVE */ + +static MYSQL_SYSVAR_STR(log_group_home_dir, innobase_log_group_home_dir, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path to InnoDB log files.", NULL, NULL, NULL); + +static MYSQL_SYSVAR_ULONG(max_dirty_pages_pct, srv_max_buf_pool_modified_pct, + PLUGIN_VAR_RQCMDARG, + "Percentage of dirty pages allowed in bufferpool.", + NULL, NULL, 90, 0, 100, 0); + +static MYSQL_SYSVAR_ULONG(max_purge_lag, srv_max_purge_lag, + PLUGIN_VAR_RQCMDARG, + "Desired maximum length of the purge queue (0 = no limit)", + NULL, NULL, 0, 0, ~0L, 0); + +static MYSQL_SYSVAR_BOOL(rollback_on_timeout, innobase_rollback_on_timeout, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY, + "Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_BOOL(status_file, innobase_create_status_file, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_NOSYSVAR, + "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file", + NULL, NULL, FALSE); + +static MYSQL_SYSVAR_BOOL(stats_on_metadata, innobase_stats_on_metadata, + PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_NOSYSVAR, + "Enable statistics gathering for metadata commands such as SHOW TABLE STATUS (on by default)", + NULL, NULL, TRUE); + +static MYSQL_SYSVAR_LONG(additional_mem_pool_size, innobase_additional_mem_pool_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", + NULL, NULL, 1*1024*1024L, 512*1024L, ~0L, 1024); + +static MYSQL_SYSVAR_ULONG(autoextend_increment, srv_auto_extend_increment, + PLUGIN_VAR_RQCMDARG, + "Data file autoextend increment in megabytes", + NULL, NULL, 8L, 1L, 1000L, 0); + +static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", + NULL, NULL, 8*1024*1024L, 1024*1024L, LONGLONG_MAX, 1024*1024L); + +static MYSQL_SYSVAR_ULONG(commit_concurrency, srv_commit_concurrency, + PLUGIN_VAR_RQCMDARG, + "Helps in performance tuning in heavily concurrent environments.", + NULL, NULL, 0, 0, 1000, 0); + +static MYSQL_SYSVAR_ULONG(concurrency_tickets, srv_n_free_tickets_to_enter, + PLUGIN_VAR_RQCMDARG, + "Number of times a thread is allowed to enter InnoDB within the same SQL query after it has once got the ticket", + NULL, NULL, 500L, 1L, ~0L, 0); + +static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of file I/O threads in InnoDB.", + NULL, NULL, 4, 4, 64, 0); + +static MYSQL_SYSVAR_LONG(force_recovery, innobase_force_recovery, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Helps to save your data in case the disk image of the database becomes corrupt.", + NULL, NULL, 0, 0, 6, 0); + +static MYSQL_SYSVAR_LONG(lock_wait_timeout, innobase_lock_wait_timeout, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back.", + NULL, NULL, 50, 1, 1024 * 1024 * 1024, 0); + +static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "The size of the buffer which InnoDB uses to write log to the log files on disk.", + NULL, NULL, 1024*1024L, 256*1024L, ~0L, 1024); + +static MYSQL_SYSVAR_LONGLONG(log_file_size, innobase_log_file_size, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Size of each log file in a log group.", + NULL, NULL, 5*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 1024*1024L); + +static MYSQL_SYSVAR_LONG(log_files_in_group, innobase_log_files_in_group, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.", + NULL, NULL, 2, 2, 100, 0); + +static MYSQL_SYSVAR_LONG(mirrored_log_groups, innobase_mirrored_log_groups, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Number of identical copies of log groups we keep for the database. Currently this should be set to 1.", + NULL, NULL, 1, 1, 10, 0); + +static MYSQL_SYSVAR_LONG(open_files, innobase_open_files, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "How many files at the maximum InnoDB keeps open at the same time.", + NULL, NULL, 300L, 10L, ~0L, 0); + +static MYSQL_SYSVAR_ULONG(sync_spin_loops, srv_n_spin_wait_rounds, + PLUGIN_VAR_RQCMDARG, + "Count of spin-loop rounds in InnoDB mutexes", + NULL, NULL, 20L, 0L, ~0L, 0); + +static MYSQL_SYSVAR_ULONG(thread_concurrency, srv_thread_concurrency, + PLUGIN_VAR_RQCMDARG, + "Helps in performance tuning in heavily concurrent environments. Sets the maximum number of threads allowed inside InnoDB. Value 0 will disable the thread throttling.", + NULL, NULL, 8, 0, 1000, 0); + +static MYSQL_SYSVAR_ULONG(thread_sleep_delay, srv_thread_sleep_delay, + PLUGIN_VAR_RQCMDARG, + "Time of innodb thread sleeping before joining InnoDB queue (usec). Value 0 disable a sleep", + NULL, NULL, 10000L, 0L, ~0L, 0); + +static MYSQL_SYSVAR_STR(data_file_path, innobase_data_file_path, + PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, + "Path to individual files and their sizes.", + NULL, NULL, NULL); + +static struct st_mysql_sys_var* innobase_system_variables[]= { + MYSQL_SYSVAR(additional_mem_pool_size), + MYSQL_SYSVAR(autoextend_increment), + MYSQL_SYSVAR(buffer_pool_size), + MYSQL_SYSVAR(checksums), + MYSQL_SYSVAR(commit_concurrency), + MYSQL_SYSVAR(concurrency_tickets), + MYSQL_SYSVAR(data_file_path), + MYSQL_SYSVAR(data_home_dir), + MYSQL_SYSVAR(doublewrite), + MYSQL_SYSVAR(fast_shutdown), + MYSQL_SYSVAR(file_io_threads), + MYSQL_SYSVAR(file_per_table), + MYSQL_SYSVAR(flush_log_at_trx_commit), + MYSQL_SYSVAR(flush_method), + MYSQL_SYSVAR(force_recovery), + MYSQL_SYSVAR(locks_unsafe_for_binlog), + MYSQL_SYSVAR(lock_wait_timeout), + MYSQL_SYSVAR(log_arch_dir), +#ifdef UNIV_LOG_ARCHIVE + MYSQL_SYSVAR(log_archive), +#endif /* UNIV_LOG_ARCHIVE */ + MYSQL_SYSVAR(log_buffer_size), + MYSQL_SYSVAR(log_file_size), + MYSQL_SYSVAR(log_files_in_group), + MYSQL_SYSVAR(log_group_home_dir), + MYSQL_SYSVAR(max_dirty_pages_pct), + MYSQL_SYSVAR(max_purge_lag), + MYSQL_SYSVAR(mirrored_log_groups), + MYSQL_SYSVAR(open_files), + MYSQL_SYSVAR(rollback_on_timeout), + MYSQL_SYSVAR(stats_on_metadata), + MYSQL_SYSVAR(status_file), + MYSQL_SYSVAR(support_xa), + MYSQL_SYSVAR(sync_spin_loops), + MYSQL_SYSVAR(table_locks), + MYSQL_SYSVAR(thread_concurrency), + MYSQL_SYSVAR(thread_sleep_delay), + NULL +}; + mysql_declare_plugin(innobase) { MYSQL_STORAGE_ENGINE_PLUGIN, @@ -7690,8 +7864,8 @@ mysql_declare_plugin(innobase) NULL, /* Plugin Deinit */ 0x0100 /* 1.0 */, innodb_status_variables_export,/* status variables */ - NULL, /* system variables */ - NULL /* config options */ + innobase_system_variables, /* system variables */ + NULL /* reserved */ } mysql_declare_plugin_end; diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index f5df362b490..f2c01f71399 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -189,8 +189,6 @@ class ha_innobase: public handler uint table_changes); }; -extern ulong innobase_fast_shutdown; -extern ulong innobase_large_page_size; extern long innobase_mirrored_log_groups, innobase_log_files_in_group; extern longlong innobase_buffer_pool_size, innobase_log_file_size; extern long innobase_log_buffer_size; @@ -202,16 +200,6 @@ extern long innobase_open_files; extern char *innobase_data_home_dir, *innobase_data_file_path; extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; extern char *innobase_unix_file_flush_method; -/* The following variables have to be my_bool for SHOW VARIABLES to work */ -extern my_bool innobase_log_archive, - innobase_use_doublewrite, - innobase_use_checksums, - innobase_use_large_pages, - innobase_use_native_aio, - innobase_file_per_table, innobase_locks_unsafe_for_binlog, - innobase_rollback_on_timeout, - innobase_create_status_file, - innobase_stats_on_metadata; extern "C" { extern ulong srv_max_buf_pool_modified_pct; extern ulong srv_max_purge_lag; diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index fe36b0d1a01..e75c200cc3a 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -499,6 +499,14 @@ struct trx_struct{ ulint mysql_process_no;/* since in Linux, 'top' reports process id's and not thread id's, we store the process number too */ + ibool allow_duplicates;/* normally FALSE, but if the user + wants to update duplicate rows, + (in table inserts, for example) we + set this TRUE */ + ibool replace_duplicates;/* normally FALSE, but if the user + wants to replace duplicate rows, + (in table inserts, for example) we + set this TRUE */ /*------------------------------*/ ulint n_mysql_tables_in_use; /* number of Innobase tables used in the processing of the current diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index d31f5b7e792..298e3e6c433 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -694,10 +694,10 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) for (i= 0; i < table->s->keys; i++) { - struct st_plugin_int *parser= table->key_info[i].parser; + plugin_ref parser= table->key_info[i].parser; if (table->key_info[i].flags & HA_USES_PARSER) file->s->keyinfo[i].parser= - (struct st_mysql_ftparser *)parser->plugin->info; + (struct st_mysql_ftparser *)plugin_decl(parser)->info; table->key_info[i].block_size= file->s->keyinfo[i].block_length; } my_errno= 0; @@ -723,7 +723,7 @@ int ha_myisam::close(void) int ha_myisam::write_row(byte * buf) { - statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); /* If we have a timestamp column, update it to the current time */ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -1596,7 +1596,7 @@ bool ha_myisam::is_crashed() const int ha_myisam::update_row(const byte * old_data, byte * new_data) { - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return mi_update(file,old_data,new_data); @@ -1604,7 +1604,7 @@ int ha_myisam::update_row(const byte * old_data, byte * new_data) int ha_myisam::delete_row(const byte * buf) { - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); return mi_delete(file,buf); } @@ -1612,8 +1612,7 @@ int ha_myisam::index_read(byte *buf, const byte *key, key_part_map keypart_map, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error=mi_rkey(file, buf, active_index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1623,8 +1622,7 @@ int ha_myisam::index_read_idx(byte *buf, uint index, const byte *key, key_part_map keypart_map, enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error=mi_rkey(file, buf, index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1635,8 +1633,7 @@ int ha_myisam::index_read_last(byte *buf, const byte *key, { DBUG_ENTER("ha_myisam::index_read_last"); DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error=mi_rkey(file, buf, active_index, key, keypart_map, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -1646,8 +1643,7 @@ int ha_myisam::index_read_last(byte *buf, const byte *key, int ha_myisam::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=mi_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1656,8 +1652,7 @@ int ha_myisam::index_next(byte * buf) int ha_myisam::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); int error=mi_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1666,8 +1661,7 @@ int ha_myisam::index_prev(byte * buf) int ha_myisam::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); int error=mi_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1676,8 +1670,7 @@ int ha_myisam::index_first(byte * buf) int ha_myisam::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); int error=mi_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1688,8 +1681,7 @@ int ha_myisam::index_next_same(byte * buf, uint length __attribute__((unused))) { DBUG_ASSERT(inited==INDEX); - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=mi_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1705,8 +1697,7 @@ int ha_myisam::rnd_init(bool scan) int ha_myisam::rnd_next(byte *buf) { - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); int error=mi_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1719,8 +1710,7 @@ int ha_myisam::restart_rnd_next(byte *buf, byte *pos) int ha_myisam::rnd_pos(byte * buf, byte *pos) { - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 96f7db6e633..5df3e7799d0 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -148,7 +148,7 @@ int ha_myisammrg::close(void) int ha_myisammrg::write_row(byte * buf) { - statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (file->merge_insert_method == MERGE_INSERT_DISABLED || !file->tables) return (HA_ERR_TABLE_READONLY); @@ -166,7 +166,7 @@ int ha_myisammrg::write_row(byte * buf) int ha_myisammrg::update_row(const byte * old_data, byte * new_data) { - statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_update_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return myrg_update(file,old_data,new_data); @@ -174,7 +174,7 @@ int ha_myisammrg::update_row(const byte * old_data, byte * new_data) int ha_myisammrg::delete_row(const byte * buf) { - statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); return myrg_delete(file,buf); } @@ -182,8 +182,7 @@ int ha_myisammrg::index_read(byte * buf, const byte * key, key_part_map keypart_map, enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error=myrg_rkey(file,buf,active_index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -193,8 +192,7 @@ int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, key_part_map keypart_map, enum ha_rkey_function find_flag) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error=myrg_rkey(file,buf,index, key, keypart_map, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -203,8 +201,7 @@ int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisammrg::index_read_last(byte * buf, const byte * key, key_part_map keypart_map) { - statistic_increment(table->in_use->status_var.ha_read_key_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_key_count); int error=myrg_rkey(file,buf,active_index, key, keypart_map, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -213,8 +210,7 @@ int ha_myisammrg::index_read_last(byte * buf, const byte * key, int ha_myisammrg::index_next(byte * buf) { - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=myrg_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -222,8 +218,7 @@ int ha_myisammrg::index_next(byte * buf) int ha_myisammrg::index_prev(byte * buf) { - statistic_increment(table->in_use->status_var.ha_read_prev_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_prev_count); int error=myrg_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -231,8 +226,7 @@ int ha_myisammrg::index_prev(byte * buf) int ha_myisammrg::index_first(byte * buf) { - statistic_increment(table->in_use->status_var.ha_read_first_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_first_count); int error=myrg_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -240,8 +234,7 @@ int ha_myisammrg::index_first(byte * buf) int ha_myisammrg::index_last(byte * buf) { - statistic_increment(table->in_use->status_var.ha_read_last_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_last_count); int error=myrg_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -251,8 +244,7 @@ int ha_myisammrg::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { - statistic_increment(table->in_use->status_var.ha_read_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_next_count); int error=myrg_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -267,8 +259,7 @@ int ha_myisammrg::rnd_init(bool scan) int ha_myisammrg::rnd_next(byte *buf) { - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -277,8 +268,7 @@ int ha_myisammrg::rnd_next(byte *buf) int ha_myisammrg::rnd_pos(byte * buf, byte *pos) { - statistic_increment(table->in_use->status_var.ha_read_rnd_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_count); int error=myrg_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -615,7 +605,6 @@ static int myisammrg_init(void *p) myisammrg_hton= (handlerton *)p; - myisammrg_hton->state= SHOW_OPTION_YES; myisammrg_hton->db_type= DB_TYPE_MRG_MYISAM; myisammrg_hton->create= myisammrg_create_handler; myisammrg_hton->panic= myisammrg_panic; diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp index 94768e6ae52..c1bfdee9342 100644 --- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp +++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp @@ -612,10 +612,11 @@ static my_bool parse_mycnf_opt(int, const struct my_option * opt, char * value) { + long *app_type= (long*) &opt->app_type; if(opt->comment) - ((struct my_option *)opt)->app_type++; + (*app_type)++; else - ((struct my_option *)opt)->app_type = order++; + *app_type = order++; return 0; } @@ -948,22 +949,6 @@ end: template class Vector<struct my_option>; -#if 0 -struct my_option -{ - const char *name; /* Name of the option */ - int id; /* unique id or short option */ - const char *comment; /* option comment, for autom. --help */ - gptr *value; /* The variable value */ - gptr *u_max_value; /* The user def. max variable value */ - const char **str_values; /* Pointer to possible values */ - ulong var_type; - enum get_opt_arg_type arg_type; - longlong def_value; /* Default value */ - longlong min_value; /* Min allowed value */ - longlong max_value; /* Max allowed value */ - longlong sub_size; /* Subtract this from given value */ - long block_size; /* Value should be a mult. of this */ - int app_type; /* To be used by an application */ -}; -#endif +/* + See include/my_getopt.h for the declaration of struct my_option +*/ |