diff options
author | unknown <guilhem@mysql.com> | 2006-06-02 22:22:59 +0200 |
---|---|---|
committer | unknown <guilhem@mysql.com> | 2006-06-02 22:22:59 +0200 |
commit | 78e288b4a7efc9fc4bfd0cc8949e265be48f1ee7 (patch) | |
tree | 025336cab0c9822d03d387ce26f536a8b0d0712e | |
parent | 0c68b7104fb92ff0998c17d2b71534ec23a7f8ad (diff) | |
parent | e63f3779d4b19acddbc561e989293b7b4f0559d4 (diff) | |
download | mariadb-git-78e288b4a7efc9fc4bfd0cc8949e265be48f1ee7.tar.gz |
Merge gbichot@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into mysql.com:/home/mysql_src/mysql-5.1-new-WL3146-handler
sql/ha_berkeley.cc:
Auto merged
sql/ha_berkeley.h:
Auto merged
sql/ha_heap.cc:
Auto merged
sql/ha_innodb.cc:
Auto merged
sql/ha_innodb.h:
Auto merged
sql/ha_myisam.cc:
Auto merged
sql/ha_ndbcluster.cc:
Auto merged
sql/ha_ndbcluster.h:
Auto merged
sql/ha_partition.cc:
Auto merged
sql/handler.cc:
Auto merged
sql/handler.h:
Auto merged
sql/log.cc:
Auto merged
sql/log_event.cc:
Auto merged
sql/sql_insert.cc:
Auto merged
sql/sql_load.cc:
Auto merged
sql/sql_select.cc:
Auto merged
sql/sql_table.cc:
Auto merged
storage/archive/ha_archive.cc:
Auto merged
storage/archive/ha_archive.h:
Auto merged
-rw-r--r-- | sql/ha_berkeley.cc | 17 | ||||
-rw-r--r-- | sql/ha_berkeley.h | 5 | ||||
-rw-r--r-- | sql/ha_heap.cc | 9 | ||||
-rw-r--r-- | sql/ha_heap.h | 5 | ||||
-rw-r--r-- | sql/ha_innodb.cc | 25 | ||||
-rw-r--r-- | sql/ha_innodb.h | 5 | ||||
-rw-r--r-- | sql/ha_myisam.cc | 19 | ||||
-rw-r--r-- | sql/ha_myisam.h | 5 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 13 | ||||
-rw-r--r-- | sql/ha_ndbcluster.h | 7 | ||||
-rw-r--r-- | sql/ha_partition.cc | 55 | ||||
-rw-r--r-- | sql/ha_partition.h | 6 | ||||
-rw-r--r-- | sql/handler.cc | 119 | ||||
-rw-r--r-- | sql/handler.h | 23 | ||||
-rw-r--r-- | sql/log.cc | 5 | ||||
-rw-r--r-- | sql/log_event.cc | 4 | ||||
-rw-r--r-- | sql/sql_insert.cc | 21 | ||||
-rw-r--r-- | sql/sql_load.cc | 8 | ||||
-rw-r--r-- | sql/sql_select.cc | 2 | ||||
-rw-r--r-- | sql/sql_table.cc | 4 | ||||
-rw-r--r-- | storage/archive/ha_archive.cc | 8 | ||||
-rw-r--r-- | storage/archive/ha_archive.h | 5 |
22 files changed, 303 insertions, 67 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 9a144066057..317d85da742 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -2232,8 +2232,12 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, } -ulonglong ha_berkeley::get_auto_increment() +void ha_berkeley::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { + /* Ideally in case of real error (not "empty table") nr should be ~ULL(0) */ ulonglong nr=1; // Default if error or new key int error; (void) ha_berkeley::extra(HA_EXTRA_KEYREAD); @@ -2244,9 +2248,18 @@ ulonglong ha_berkeley::get_auto_increment() if (!table_share->next_number_key_offset) { // Autoincrement at key-start error=ha_berkeley::index_last(table->record[1]); + /* has taken read lock on page of max key so reserves to infinite */ + *nb_reserved_values= ULONGLONG_MAX; } else { + /* + MySQL needs to call us for next row: assume we are inserting ("a",null) + here, we return 3, and next this statement will want to insert ("b",null): + there is no reason why ("b",3+1) would be the good row to insert: maybe it + already exists, maybe 3+1 is too large... + */ + *nb_reserved_values= 1; DBT row,old_key; bzero((char*) &row,sizeof(row)); KEY *key_info= &table->key_info[active_index]; @@ -2287,7 +2300,7 @@ ulonglong ha_berkeley::get_auto_increment() table->next_number_field->val_int_offset(table_share->rec_buff_length)+1; ha_berkeley::index_end(); (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD); - return nr; + *first_value= nr; } void ha_berkeley::print_error(int error, myf errflag) diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index 50a2aec3963..e6eb32fee52 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -149,7 +149,10 @@ class ha_berkeley: public handler int5store(to,share->auto_ident); pthread_mutex_unlock(&share->mutex); } - ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); void print_error(int error, myf errflag); uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; } bool primary_key_is_clustered() { return true; } diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 15cec9ec4a6..84804d2e609 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -663,10 +663,15 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info) create_info->auto_increment_value= auto_increment_value; } -ulonglong ha_heap::get_auto_increment() +void ha_heap::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { ha_heap::info(HA_STATUS_AUTO); - return auto_increment_value; + *first_value= auto_increment_value; + /* such table has only table-level locking so reserves up to +inf */ + *nb_reserved_values= ULONGLONG_MAX; } diff --git a/sql/ha_heap.h b/sql/ha_heap.h index 9b9b7f90d90..40d39bfad42 100644 --- a/sql/ha_heap.h +++ b/sql/ha_heap.h @@ -71,7 +71,10 @@ public: int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); - ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); int index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte * buf, uint idx, const byte * key, diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 58249cf9f9a..b2fd53c8ea6 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -6946,17 +6946,21 @@ func_exit_early: return(error); } -/*********************************************************************** +/******************************************************************************* This function initializes the auto-inc counter if it has not been initialized yet. This function does not change the value of the auto-inc counter if it already has been initialized. Returns the value of the -auto-inc counter. */ +auto-inc counter in *first_value, and ULONGLONG_MAX in *nb_reserved_values (as +we have a table-level lock). offset, increment, nb_desired_values are ignored. +*first_value is set to -1 if error (deadlock or lock wait timeout) */ -ulonglong -ha_innobase::get_auto_increment() -/*=============================*/ - /* out: auto-increment column value, -1 if error - (deadlock or lock wait timeout) */ +void ha_innobase::get_auto_increment( +/*=================================*/ + ulonglong offset, /* in */ + ulonglong increment, /* in */ + ulonglong nb_desired_values, /* in */ + ulonglong *first_value, /* out */ + ulonglong *nb_reserved_values) /* out */ { longlong nr; int error; @@ -6971,10 +6975,13 @@ ha_innobase::get_auto_increment() ut_print_timestamp(stderr); sql_print_error("Error %lu in ::get_auto_increment()", (ulong) error); - return(~(ulonglong) 0); + *first_value= (~(ulonglong) 0); + return; } - return((ulonglong) nr); + *first_value= (ulonglong) nr; + /* table-level autoinc lock reserves up to +inf */ + *nb_reserved_values= ULONGLONG_MAX; } /* See comment in handler.h */ diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index 638ecc432b9..0f58589cf43 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -181,7 +181,10 @@ class ha_innobase: public handler THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); void init_table_handle_for_HANDLER(); - ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); int reset_auto_increment(ulonglong value); virtual bool get_error_message(int error, String *buf); diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index d11eb7aa891..bf0f85a5815 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1638,7 +1638,10 @@ int ha_myisam::rename_table(const char * from, const char * to) } -ulonglong ha_myisam::get_auto_increment() +void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { ulonglong nr; int error; @@ -1647,7 +1650,10 @@ ulonglong ha_myisam::get_auto_increment() if (!table->s->next_number_key_offset) { // Autoincrement at key-start ha_myisam::info(HA_STATUS_AUTO); - return auto_increment_value; + *first_value= auto_increment_value; + /* MyISAM has only table-level lock, so reserves to +inf */ + *nb_reserved_values= ULONGLONG_MAX; + return; } /* it's safe to call the following if bulk_insert isn't on */ @@ -1668,7 +1674,14 @@ ulonglong ha_myisam::get_auto_increment() val_int_offset(table->s->rec_buff_length)+1); } extra(HA_EXTRA_NO_KEYREAD); - return nr; + *first_value= nr; + /* + MySQL needs to call us for next row: assume we are inserting ("a",null) + here, we return 3, and next this statement will want to insert ("b",null): + there is no reason why ("b",3+1) would be the good row to insert: maybe it + already exists, maybe 3+1 is too large... + */ + *nb_reserved_values= 1; } diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 86efed27478..c5e712d4256 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -113,7 +113,10 @@ class ha_myisam: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); int rename_table(const char * from, const char * to); int delete_table(const char *name); int check(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 62a5502d635..e6d1bbaae12 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -5253,7 +5253,10 @@ int ha_ndbcluster::delete_table(const char *name) } -ulonglong ha_ndbcluster::get_auto_increment() +void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { int cache_size; Uint64 auto_value; @@ -5287,9 +5290,13 @@ ulonglong ha_ndbcluster::get_auto_increment() const NdbError err= ndb->getNdbError(); sql_print_error("Error %lu in ::get_auto_increment(): %s", (ulong) err.code, err.message); - DBUG_RETURN(~(ulonglong) 0); + *first_value= ~(ulonglong) 0; + DBUG_VOID_RETURN; } - DBUG_RETURN((longlong)auto_value); + *first_value= (longlong)auto_value; + /* From the point of view of MySQL, NDB reserves one row at a time */ + *nb_reserved_values= 1; + DBUG_VOID_RETURN; } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 3c8a4d260a3..985be17c1fb 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -803,7 +803,10 @@ private: int set_index_key(NdbOperation *, const KEY *key_info, const byte *key_ptr); void print_results(); - ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); int ndb_err(NdbTransaction*); bool uses_blob_value(); @@ -865,7 +868,7 @@ private: bool m_primary_key_update; bool m_write_op; bool m_ignore_no_key; - ha_rows m_rows_to_insert; + ha_rows m_rows_to_insert; // TODO: merge it with handler::estimation_rows_to_insert? ha_rows m_rows_inserted; ha_rows m_bulk_insert_rows; ha_rows m_rows_changed; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 7fa5b89abf8..418c3654531 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -2775,7 +2775,7 @@ void ha_partition::start_bulk_insert(ha_rows rows) file= m_file; do { - (*file)->start_bulk_insert(rows); + (*file)->ha_start_bulk_insert(rows); } while (*(++file)); DBUG_VOID_RETURN; } @@ -2802,7 +2802,7 @@ int ha_partition::end_bulk_insert() do { int tmp; - if ((tmp= (*file)->end_bulk_insert())) + if ((tmp= (*file)->ha_end_bulk_insert())) error= tmp; } while (*(++file)); DBUG_RETURN(error); @@ -4127,8 +4127,11 @@ void ha_partition::info(uint flag) if (flag & HA_STATUS_AUTO) { + ulonglong nb_reserved_values; DBUG_PRINT("info", ("HA_STATUS_AUTO")); - auto_increment_value= get_auto_increment(); + /* we don't want to reserve any values, it's pure information */ + get_auto_increment(0, 0, 0, &auto_increment_value, &nb_reserved_values); + release_auto_increment(); } if (flag & HA_STATUS_VARIABLE) { @@ -5274,19 +5277,55 @@ void ha_partition::restore_auto_increment() partitions. */ -ulonglong ha_partition::get_auto_increment() +void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { - ulonglong auto_inc, max_auto_inc= 0; + ulonglong first_value_part, last_value_part, nb_reserved_values_part, + last_value; DBUG_ENTER("ha_partition::get_auto_increment"); + *first_value= 0; + last_value= ULONGLONG_MAX; for (uint i= 0; i < m_tot_parts; i++) { - auto_inc= m_file[i]->get_auto_increment(); - set_if_bigger(max_auto_inc, auto_inc); + m_file[i]->get_auto_increment(offset, increment, nb_desired_values, + &first_value_part, &nb_reserved_values_part); + if (first_value_part == ~(ulonglong)(0)) // error in one partition + { + *first_value= first_value_part; + break; + } + /* + Partition has reserved an interval. Intersect it with the intervals + already reserved for the previous partitions. + */ + last_value_part= (nb_reserved_values_part == ULONGLONG_MAX) ? + ULONGLONG_MAX : (first_value_part + nb_reserved_values_part * increment); + set_if_bigger(*first_value, first_value_part); + set_if_smaller(last_value, last_value_part); + } + if (last_value < *first_value) /* empty intersection, error */ + { + *first_value= ~(ulonglong)(0); } - DBUG_RETURN(max_auto_inc); + *nb_reserved_values= (last_value == ULONGLONG_MAX) ? + ULONGLONG_MAX : ((last_value - *first_value) / increment); + + DBUG_VOID_RETURN; } +void ha_partition::release_auto_increment() +{ + DBUG_ENTER("ha_partition::release_auto_increment"); + + for (uint i= 0; i < m_tot_parts; i++) + { + m_file[i]->release_auto_increment(); + } + DBUG_VOID_RETURN; +} /**************************************************************************** MODULE initialise handler for HANDLER call diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1443a20133c..44c50e0af3f 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -815,7 +815,11 @@ public: ------------------------------------------------------------------------- */ virtual void restore_auto_increment(); - virtual ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); + virtual void release_auto_increment(); /* ------------------------------------------------------------------------- diff --git a/sql/handler.cc b/sql/handler.cc index 292e26109f8..06753f82c4e 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1595,7 +1595,7 @@ next_insert_id(ulonglong nr,struct system_variables *variables) Update the auto_increment field if necessary SYNOPSIS - update_auto_increment() + update_auto_increment() RETURN 0 ok @@ -1624,8 +1624,9 @@ next_insert_id(ulonglong nr,struct system_variables *variables) statement. For the following rows we generate new numbers based on the last used number. - - thd->next_insert_id != 0. This happens when we have read a statement - from the binary log or when one has used SET LAST_INSERT_ID=#. + - thd->next_insert_id != 0. This happens when we have read an Intvar event + of type INSERT_ID_EVENT from the binary log or when one has used SET + INSERT_ID=#. In this case we will set the column to the value of next_insert_id. The next row will be given the id @@ -1641,8 +1642,20 @@ next_insert_id(ulonglong nr,struct system_variables *variables) start counting from the inserted value. thd->next_insert_id is cleared after it's been used for a statement. + + TODO + + Replace all references to "next number" or NEXT_NUMBER to + "auto_increment", everywhere (see below: there is + table->auto_increment_field_not_null, and there also exists + table->next_number_field, it's not consistent). + */ +#define AUTO_INC_DEFAULT_NB_ROWS 1 // Some prefer 1024 here +#define AUTO_INC_DEFAULT_NB_MAX_BITS 16 +#define AUTO_INC_DEFAULT_NB_MAX ((1 << AUTO_INC_DEFAULT_NB_MAX_BITS) - 1) + bool handler::update_auto_increment() { ulonglong nr; @@ -1658,17 +1671,24 @@ bool handler::update_auto_increment() */ thd->prev_insert_id= thd->next_insert_id; auto_increment_field_not_null= table->auto_increment_field_not_null; - table->auto_increment_field_not_null= FALSE; + table->auto_increment_field_not_null= FALSE; // to reset for next row if ((nr= table->next_number_field->val_int()) != 0 || auto_increment_field_not_null && thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { - /* Clear flag for next row */ - /* Mark that we didn't generate a new value **/ + /* + The user did specify a value for the auto_inc column, we don't generate + a new value, write it down. + */ auto_increment_column_changed=0; - /* Update next_insert_id if we have already generated a value */ + /* + Update next_insert_id if we had already generated a value in this + statement (case of INSERT VALUES(null),(3763),(null): + the last NULL needs to insert 3764, not the value of the first NULL plus + 1). + */ if (thd->clear_next_insert_id && nr >= thd->next_insert_id) { if (variables->auto_increment_increment != 1) @@ -1682,9 +1702,53 @@ bool handler::update_auto_increment() } if (!(nr= thd->next_insert_id)) { - if ((nr= get_auto_increment()) == ~(ulonglong) 0) + ulonglong nb_desired_values= 1, nb_reserved_values; +#ifdef TO_BE_ENABLED_SOON + /* + Reserved intervals will be stored in "THD::auto_inc_intervals". + handler::estimation_rows_to_insert will be the argument passed by + handler::ha_start_bulk_insert(). + */ + uint estimation_known= test(estimation_rows_to_insert > 0); + uint nb_already_reserved_intervals= thd->auto_inc_intervals.nb_elements(); + /* + If an estimation was given to the engine: + - use it. + - if we already reserved numbers, it means the estimation was + not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_VALUES the 2nd + time, twice that the 3rd time etc. + If no estimation was given, use those increasing defaults from the + start, starting from AUTO_INC_DEFAULT_NB_VALUES. + Don't go beyond a max to not reserve "way too much" (because reservation + means potentially losing unused values). + */ + if (nb_already_reserved_intervals == 0 && estimation_known) + nb_desired_values= estimation_rows_to_insert; + else /* go with the increasing defaults */ + { + /* avoid overflow in formula, with this if() */ + if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS) + { + nb_desired_values= AUTO_INC_DEFAULT_NB_VALUES * + (1 << nb_already_reserved_intervals); + set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX); + } + else + nb_desired_values= AUTO_INC_DEFAULT_NB_MAX; + } +#endif + /* This call ignores all its parameters but nr, currently */ + get_auto_increment(variables->auto_increment_offset, + variables->auto_increment_increment, + nb_desired_values, &nr, + &nb_reserved_values); + if (nr == ~(ulonglong) 0) result= 1; // Mark failure + /* + That should not be needed when engines actually use offset and increment + above. + */ if (variables->auto_increment_increment != 1) nr= next_insert_id(nr-1, variables); /* @@ -1742,7 +1806,29 @@ void handler::restore_auto_increment() } -ulonglong handler::get_auto_increment() +/* + Reserves an interval of auto_increment values from the handler. + + SYNOPSIS + get_auto_increment() + offset + increment + nb_desired_values how many values we want + first_value (OUT) the first value reserved by the handler + nb_reserved_values (OUT) how many values the handler reserved + + offset and increment means that we want values to be of the form + offset + N * increment, where N>=0 is integer. + If the function sets *first_value to ~(ulonglong)0 it means an error. + If the function sets *nb_reserved_values to ULONGLONG_MAX it means it has + reserved to "positive infinite". + +*/ + +void handler::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { ulonglong nr; int error; @@ -1752,6 +1838,12 @@ ulonglong handler::get_auto_increment() if (!table->s->next_number_key_offset) { // Autoincrement at key-start error=index_last(table->record[1]); + /* + MySQL implicitely assumes such method does locking (as MySQL decides to + use nr+increment without checking again with the handler, in + handler::update_auto_increment()), so reserves to infinite. + */ + *nb_reserved_values= ULONGLONG_MAX; } else { @@ -1761,6 +1853,13 @@ ulonglong handler::get_auto_increment() table->s->next_number_key_offset); error= index_read(table->record[1], key, table->s->next_number_key_offset, HA_READ_PREFIX_LAST); + /* + MySQL needs to call us for next row: assume we are inserting ("a",null) + here, we return 3, and next this statement will want to insert + ("b",null): there is no reason why ("b",3+1) would be the good row to + insert: maybe it already exists, maybe 3+1 is too large... + */ + *nb_reserved_values= 1; } if (error) @@ -1770,7 +1869,7 @@ ulonglong handler::get_auto_increment() val_int_offset(table->s->rec_buff_length)+1); index_end(); (void) extra(HA_EXTRA_NO_KEYREAD); - return nr; + *first_value= nr; } diff --git a/sql/handler.h b/sql/handler.h index 6f19e65b2c5..73fa8cb8c7d 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -837,6 +837,9 @@ class handler :public Sql_alloc private: virtual int reset() { return extra(HA_EXTRA_RESET); } + ha_rows estimation_rows_to_insert; + virtual void start_bulk_insert(ha_rows rows) {} + virtual int end_bulk_insert() {return 0; } public: const handlerton *ht; /* storage engine of this handler */ byte *ref; /* Pointer to current row */ @@ -881,7 +884,7 @@ public: MY_BITMAP *write_set; handler(const handlerton *ht_arg, TABLE_SHARE *share_arg) - :table_share(share_arg), ht(ht_arg), + :table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg), ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0), delete_length(0), auto_increment_value(0), records(0), deleted(0), mean_rec_length(0), @@ -1302,7 +1305,11 @@ public: */ virtual int delete_all_rows() { return (my_errno=HA_ERR_WRONG_COMMAND); } - virtual ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); + virtual void release_auto_increment() { return; }; virtual void restore_auto_increment(); /* @@ -1363,8 +1370,16 @@ public: virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; } virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; } virtual int indexes_are_disabled(void) {return 0;} - virtual void start_bulk_insert(ha_rows rows) {} - virtual int end_bulk_insert() {return 0; } + void ha_start_bulk_insert(ha_rows rows) + { + estimation_rows_to_insert= rows; + start_bulk_insert(rows); + } + int ha_end_bulk_insert() + { + estimation_rows_to_insert= 0; + return end_bulk_insert(); + } virtual int discard_or_import_tablespace(my_bool discard) {return HA_ERR_WRONG_COMMAND;} virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; } diff --git a/sql/log.cc b/sql/log.cc index 86a29feb026..6fbb893578b 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2915,6 +2915,11 @@ bool MYSQL_LOG::write(Log_event *event_info) } if (thd->insert_id_used) { + /* + If the auto_increment was second in a table's index (possible with + MyISAM or BDB) (table->next_number_key_offset != 0), such event is + in fact not necessary. We could avoid logging it. + */ Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id); if (e.write(file)) goto err; diff --git a/sql/log_event.cc b/sql/log_event.cc index cbf1875c9ec..0f8d4146917 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -6130,7 +6130,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table) how many rows are going to be inserted, then it can allocate needed memory from the start. */ - table->file->start_bulk_insert(0); + table->file->ha_start_bulk_insert(0); /* We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill any TIMESTAMP column with data from the row but instead will use @@ -6153,7 +6153,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table) int Write_rows_log_event::do_after_row_operations(TABLE *table, int error) { if (error == 0) - error= table->file->end_bulk_insert(); + error= table->file->ha_end_bulk_insert(); return error; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index d8934b54b85..cfbf9680518 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -428,7 +428,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, the code to make the call of end_bulk_insert() below safe. */ if (lock_type != TL_WRITE_DELAYED && !thd->prelocked_mode) - table->file->start_bulk_insert(values_list.elements); + table->file->ha_start_bulk_insert(values_list.elements); thd->no_trans_update= 0; thd->abort_on_warning= (!ignore && @@ -553,7 +553,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, else #endif { - if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error) + if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error) { table->file->print_error(my_errno,MYF(0)); error=1; @@ -644,6 +644,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, thd->row_count_func= info.copied+info.deleted+info.updated; ::send_ok(thd, (ulong) thd->row_count_func, id, buff); } + if (table != NULL) + table->file->release_auto_increment(); thd->abort_on_warning= 0; DBUG_RETURN(FALSE); @@ -652,6 +654,8 @@ abort: if (lock_type == TL_WRITE_DELAYED) end_delayed_insert(thd); #endif + if (table != NULL) + table->file->release_auto_increment(); if (!joins_freed) free_underlaid_joins(thd, &thd->lex->select_lex); thd->abort_on_warning= 0; @@ -972,7 +976,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) uint key_nr; if (error != HA_WRITE_SKIP) goto err; - table->file->restore_auto_increment(); + table->file->restore_auto_increment(); // it's too early here! BUG#20188 if ((int) (key_nr = table->file->get_dup_key(error)) < 0) { error=HA_WRITE_SKIP; /* Database can't find key */ @@ -2249,7 +2253,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) We won't start bulk inserts at all if this statement uses functions or should invoke triggers since they may access to the same table too. */ - table->file->start_bulk_insert((ha_rows) 0); + table->file->ha_start_bulk_insert((ha_rows) 0); } restore_record(table,s->default_values); // Get empty record table->next_number_field=table->found_next_number_field; @@ -2300,7 +2304,7 @@ int select_insert::prepare2(void) DBUG_ENTER("select_insert::prepare2"); if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && !thd->prelocked_mode) - table->file->start_bulk_insert((ha_rows) 0); + table->file->ha_start_bulk_insert((ha_rows) 0); DBUG_RETURN(0); } @@ -2374,6 +2378,7 @@ bool select_insert::send_data(List<Item> &values) last_insert_id= thd->insert_id(); } } + table->file->release_auto_increment(); DBUG_RETURN(error); } @@ -2403,7 +2408,7 @@ void select_insert::send_error(uint errcode,const char *err) DBUG_VOID_RETURN; } if (!thd->prelocked_mode) - table->file->end_bulk_insert(); + table->file->ha_end_bulk_insert(); /* If at least one row has been inserted/modified and will stay in the table (the table doesn't have transactions) we must write to the binlog (and @@ -2459,7 +2464,7 @@ bool select_insert::send_eof() int error,error2; DBUG_ENTER("select_insert::send_eof"); - error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0; + error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); /* @@ -2726,7 +2731,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) if (info.ignore || info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (!thd->prelocked_mode) - table->file->start_bulk_insert((ha_rows) 0); + table->file->ha_start_bulk_insert((ha_rows) 0); thd->no_trans_update= 0; thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 5c5e56f9ecc..edae05bf891 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -117,7 +117,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { char name[FN_REFLEN]; File file; - TABLE *table; + TABLE *table= NULL; int error; String *field_term=ex->field_term,*escaped=ex->escaped; String *enclosed=ex->enclosed; @@ -367,7 +367,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (!thd->prelocked_mode) - table->file->start_bulk_insert((ha_rows) 0); + table->file->ha_start_bulk_insert((ha_rows) 0); table->copy_blobs=1; thd->no_trans_update= 0; @@ -384,7 +384,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, error= read_sep_field(thd, info, table_list, fields_vars, set_fields, set_values, read_info, *enclosed, skip_lines, ignore); - if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error) + if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error) { table->file->print_error(my_errno, MYF(0)); error= 1; @@ -506,6 +506,8 @@ err: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + if (table != NULL) + table->file->release_auto_increment(); thd->abort_on_warning= 0; DBUG_RETURN(error); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 4918768a03f..89661ba90a5 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -9309,7 +9309,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, all places where a corresponding end_bulk_insert() should be put. */ table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */ - new_table.file->start_bulk_insert(table->file->records); + new_table.file->ha_start_bulk_insert(table->file->records); #else /* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */ new_table.file->extra(HA_EXTRA_WRITE_CACHE); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f28a70c05bc..f39e4e77d70 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6142,7 +6142,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, MODE_STRICT_ALL_TABLES)); from->file->info(HA_STATUS_VARIABLE); - to->file->start_bulk_insert(from->file->records); + to->file->ha_start_bulk_insert(from->file->records); save_sql_mode= thd->variables.sql_mode; @@ -6262,7 +6262,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, free_io_cache(from); delete [] copy; // This is never 0 - if (to->file->end_bulk_insert() && error <= 0) + if (to->file->ha_end_bulk_insert() && error <= 0) { to->file->print_error(my_errno,MYF(0)); error=1; diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 39b887a8001..f41d8aaaa15 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -912,9 +912,13 @@ error: } -ulonglong ha_archive::get_auto_increment() +void ha_archive::get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values) { - return share->auto_increment_value + 1; + *nb_reserved_values= 1; + *first_value= share->auto_increment_value + 1; } /* Initialized at each key walk (called multiple times unlike rnd_init()) */ diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 4c53c5c4064..0314bf303bf 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -83,7 +83,10 @@ public: { return HA_ONLY_WHOLE_INDEX; } - ulonglong get_auto_increment(); + virtual void get_auto_increment(ulonglong offset, ulonglong increment, + ulonglong nb_desired_values, + ulonglong *first_value, + ulonglong *nb_reserved_values); uint max_supported_keys() const { return 1; } uint max_supported_key_length() const { return sizeof(ulonglong); } uint max_supported_key_part_length() const { return sizeof(ulonglong); } |