summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <guilhem@mysql.com>2006-06-02 22:21:32 +0200
committerunknown <guilhem@mysql.com>2006-06-02 22:21:32 +0200
commit6e96a48d03379c108a5bcbcc6458ff18d661ffb4 (patch)
treec0559b109baec8406c269ec627d05701d90cd012 /sql
parentb89c4e23e5d03136c8b74449e5c0a8fb64f7b5b6 (diff)
downloadmariadb-git-6e96a48d03379c108a5bcbcc6458ff18d661ffb4.tar.gz
First push for WL#3146 "less locking in auto_increment". It is a 0-real-change patch.
New prototype for get_auto_increment() (but new arguments not yet used), to be able to reserve a finite interval of auto_increment values from cooperating engines. A hint on how many values to reserve is found in handler::estimation_rows_to_insert, filled by ha_start_bulk_insert(), new wrapper around start_bulk_insert(). NOTE: this patch changes nothing, for all engines. But it makes the API ready for those engines which will want to do reservation. More csets will come to complete WL#3146. sql/ha_berkeley.cc: update to new prototype of get_auto_increment sql/ha_berkeley.h: update to new prototype of get_auto_increment sql/ha_heap.cc: update to new prototype of get_auto_increment sql/ha_heap.h: update to new prototype of get_auto_increment sql/ha_innodb.cc: update to new prototype of get_auto_increment sql/ha_innodb.h: update to new prototype of get_auto_increment sql/ha_myisam.cc: update to new prototype of get_auto_increment sql/ha_myisam.h: update to new prototype of get_auto_increment sql/ha_ndbcluster.cc: update to new prototype of get_auto_increment sql/ha_ndbcluster.h: update to new prototype of get_auto_increment sql/ha_partition.cc: update to new prototype of get_auto_increment sql/ha_partition.h: update to new prototype of get_auto_increment sql/handler.cc: new prototype of get_auto_increment, comments, preparation for when the MySQL layer is capable of getting finite auto_increment intervals from cooperating engines. sql/handler.h: a wrapper around start_bulk_insert(): ha_bulk_insert(), which stores the argument (number of rows expected for insertion) into a member of handler: estimation_rows_to_insert. This member will soon be used to decide how much auto_increment values we want to reserve from cooperating engines. New prototype for get_auto_increment, preparing for cooperating engines to reserve finite auto_increment intervals. release_auto_increment() will be used by the MySQL layer to inform the engine that it has not used all of the interval (engine can reclaim it). sql/log.cc: note for the future sql/log_event.cc: name of wrapper sql/sql_insert.cc: name of wrapper. When done with inserting, return unused auto_inc values to engine. sql/sql_load.cc: name of wrapper. When done with inserting, return unused auto_inc values to engine. sql/sql_select.cc: name of wrapper sql/sql_table.cc: name of wrapper storage/archive/ha_archive.cc: update to new prototype of get_auto_increment. Archive reserves only one value (Archive's god - Brian - told me that's the truth). storage/archive/ha_archive.h: update to new prototype of get_auto_increment()
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_berkeley.cc17
-rw-r--r--sql/ha_berkeley.h5
-rw-r--r--sql/ha_heap.cc9
-rw-r--r--sql/ha_heap.h5
-rw-r--r--sql/ha_innodb.cc25
-rw-r--r--sql/ha_innodb.h5
-rw-r--r--sql/ha_myisam.cc19
-rw-r--r--sql/ha_myisam.h5
-rw-r--r--sql/ha_ndbcluster.cc13
-rw-r--r--sql/ha_ndbcluster.h7
-rw-r--r--sql/ha_partition.cc55
-rw-r--r--sql/ha_partition.h6
-rw-r--r--sql/handler.cc119
-rw-r--r--sql/handler.h23
-rw-r--r--sql/log.cc5
-rw-r--r--sql/log_event.cc4
-rw-r--r--sql/sql_insert.cc21
-rw-r--r--sql/sql_load.cc8
-rw-r--r--sql/sql_select.cc2
-rw-r--r--sql/sql_table.cc4
20 files changed, 293 insertions, 64 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 473fb149871..dfdffcb70b5 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -2255,8 +2255,12 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
}
-ulonglong ha_berkeley::get_auto_increment()
+void ha_berkeley::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
+ /* Ideally in case of real error (not "empty table") nr should be ~ULL(0) */
ulonglong nr=1; // Default if error or new key
int error;
(void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
@@ -2267,9 +2271,18 @@ ulonglong ha_berkeley::get_auto_increment()
if (!table_share->next_number_key_offset)
{ // Autoincrement at key-start
error=ha_berkeley::index_last(table->record[1]);
+ /* has taken read lock on page of max key so reserves to infinite */
+ *nb_reserved_values= ULONGLONG_MAX;
}
else
{
+ /*
+ MySQL needs to call us for next row: assume we are inserting ("a",null)
+ here, we return 3, and next this statement will want to insert ("b",null):
+ there is no reason why ("b",3+1) would be the good row to insert: maybe it
+ already exists, maybe 3+1 is too large...
+ */
+ *nb_reserved_values= 1;
DBT row,old_key;
bzero((char*) &row,sizeof(row));
KEY *key_info= &table->key_info[active_index];
@@ -2310,7 +2323,7 @@ ulonglong ha_berkeley::get_auto_increment()
table->next_number_field->val_int_offset(table_share->rec_buff_length)+1;
ha_berkeley::index_end();
(void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD);
- return nr;
+ *first_value= nr;
}
void ha_berkeley::print_error(int error, myf errflag)
diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h
index 21b618b8d6d..fee2352aaaa 100644
--- a/sql/ha_berkeley.h
+++ b/sql/ha_berkeley.h
@@ -149,7 +149,10 @@ class ha_berkeley: public handler
int5store(to,share->auto_ident);
pthread_mutex_unlock(&share->mutex);
}
- ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
void print_error(int error, myf errflag);
uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
bool primary_key_is_clustered() { return true; }
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 1a7efb42748..669fcce075c 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -693,10 +693,15 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
create_info->auto_increment_value= auto_increment_value;
}
-ulonglong ha_heap::get_auto_increment()
+void ha_heap::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
ha_heap::info(HA_STATUS_AUTO);
- return auto_increment_value;
+ *first_value= auto_increment_value;
+ /* such table has only table-level locking so reserves up to +inf */
+ *nb_reserved_values= ULONGLONG_MAX;
}
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index 9b9b7f90d90..40d39bfad42 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -71,7 +71,10 @@ public:
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
- ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte * buf, uint idx, const byte * key,
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 0b2f561e8c9..9af39ce8b7a 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -6963,17 +6963,21 @@ func_exit_early:
return(error);
}
-/***********************************************************************
+/*******************************************************************************
This function initializes the auto-inc counter if it has not been
initialized yet. This function does not change the value of the auto-inc
counter if it already has been initialized. Returns the value of the
-auto-inc counter. */
+auto-inc counter in *first_value, and ULONGLONG_MAX in *nb_reserved_values (as
+we have a table-level lock). offset, increment, nb_desired_values are ignored.
+*first_value is set to -1 if error (deadlock or lock wait timeout) */
-ulonglong
-ha_innobase::get_auto_increment()
-/*=============================*/
- /* out: auto-increment column value, -1 if error
- (deadlock or lock wait timeout) */
+void ha_innobase::get_auto_increment(
+/*=================================*/
+ ulonglong offset, /* in */
+ ulonglong increment, /* in */
+ ulonglong nb_desired_values, /* in */
+ ulonglong *first_value, /* out */
+ ulonglong *nb_reserved_values) /* out */
{
longlong nr;
int error;
@@ -6988,10 +6992,13 @@ ha_innobase::get_auto_increment()
ut_print_timestamp(stderr);
sql_print_error("Error %lu in ::get_auto_increment()",
(ulong) error);
- return(~(ulonglong) 0);
+ *first_value= (~(ulonglong) 0);
+ return;
}
- return((ulonglong) nr);
+ *first_value= (ulonglong) nr;
+ /* table-level autoinc lock reserves up to +inf */
+ *nb_reserved_values= ULONGLONG_MAX;
}
/* See comment in handler.h */
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 4f0c9eb151b..2137323d1e9 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -181,7 +181,10 @@ class ha_innobase: public handler
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
void init_table_handle_for_HANDLER();
- ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
int reset_auto_increment(ulonglong value);
virtual bool get_error_message(int error, String *buf);
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 40081c975c8..a3d032d0b91 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -1690,7 +1690,10 @@ int ha_myisam::rename_table(const char * from, const char * to)
}
-ulonglong ha_myisam::get_auto_increment()
+void ha_myisam::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
ulonglong nr;
int error;
@@ -1699,7 +1702,10 @@ ulonglong ha_myisam::get_auto_increment()
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
ha_myisam::info(HA_STATUS_AUTO);
- return auto_increment_value;
+ *first_value= auto_increment_value;
+ /* MyISAM has only table-level lock, so reserves to +inf */
+ *nb_reserved_values= ULONGLONG_MAX;
+ return;
}
/* it's safe to call the following if bulk_insert isn't on */
@@ -1720,7 +1726,14 @@ ulonglong ha_myisam::get_auto_increment()
val_int_offset(table->s->rec_buff_length)+1);
}
extra(HA_EXTRA_NO_KEYREAD);
- return nr;
+ *first_value= nr;
+ /*
+ MySQL needs to call us for next row: assume we are inserting ("a",null)
+ here, we return 3, and next this statement will want to insert ("b",null):
+ there is no reason why ("b",3+1) would be the good row to insert: maybe it
+ already exists, maybe 3+1 is too large...
+ */
+ *nb_reserved_values= 1;
}
diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h
index 86efed27478..c5e712d4256 100644
--- a/sql/ha_myisam.h
+++ b/sql/ha_myisam.h
@@ -113,7 +113,10 @@ class ha_myisam: public handler
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
- ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
int rename_table(const char * from, const char * to);
int delete_table(const char *name);
int check(THD* thd, HA_CHECK_OPT* check_opt);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 3425c638005..58b56f891f2 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -5230,7 +5230,10 @@ int ha_ndbcluster::delete_table(const char *name)
}
-ulonglong ha_ndbcluster::get_auto_increment()
+void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
int cache_size;
Uint64 auto_value;
@@ -5264,9 +5267,13 @@ ulonglong ha_ndbcluster::get_auto_increment()
const NdbError err= ndb->getNdbError();
sql_print_error("Error %lu in ::get_auto_increment(): %s",
(ulong) err.code, err.message);
- DBUG_RETURN(~(ulonglong) 0);
+ *first_value= ~(ulonglong) 0;
+ DBUG_VOID_RETURN;
}
- DBUG_RETURN((longlong)auto_value);
+ *first_value= (longlong)auto_value;
+ /* From the point of view of MySQL, NDB reserves one row at a time */
+ *nb_reserved_values= 1;
+ DBUG_VOID_RETURN;
}
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index badca69941a..2acd83c7a1f 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -803,7 +803,10 @@ private:
int set_index_key(NdbOperation *, const KEY *key_info, const byte *key_ptr);
void print_results();
- ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
int ndb_err(NdbTransaction*);
bool uses_blob_value();
@@ -865,7 +868,7 @@ private:
bool m_primary_key_update;
bool m_write_op;
bool m_ignore_no_key;
- ha_rows m_rows_to_insert;
+ ha_rows m_rows_to_insert; // TODO: merge it with handler::estimation_rows_to_insert?
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;
ha_rows m_rows_changed;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index af0556f1e6f..a1644998ff2 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2803,7 +2803,7 @@ void ha_partition::start_bulk_insert(ha_rows rows)
file= m_file;
do
{
- (*file)->start_bulk_insert(rows);
+ (*file)->ha_start_bulk_insert(rows);
} while (*(++file));
DBUG_VOID_RETURN;
}
@@ -2830,7 +2830,7 @@ int ha_partition::end_bulk_insert()
do
{
int tmp;
- if ((tmp= (*file)->end_bulk_insert()))
+ if ((tmp= (*file)->ha_end_bulk_insert()))
error= tmp;
} while (*(++file));
DBUG_RETURN(error);
@@ -4155,8 +4155,11 @@ void ha_partition::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
+ ulonglong nb_reserved_values;
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
- auto_increment_value= get_auto_increment();
+ /* we don't want to reserve any values, it's pure information */
+ get_auto_increment(0, 0, 0, &auto_increment_value, &nb_reserved_values);
+ release_auto_increment();
}
if (flag & HA_STATUS_VARIABLE)
{
@@ -5302,19 +5305,55 @@ void ha_partition::restore_auto_increment()
partitions.
*/
-ulonglong ha_partition::get_auto_increment()
+void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
- ulonglong auto_inc, max_auto_inc= 0;
+ ulonglong first_value_part, last_value_part, nb_reserved_values_part,
+ last_value;
DBUG_ENTER("ha_partition::get_auto_increment");
+ *first_value= 0;
+ last_value= ULONGLONG_MAX;
for (uint i= 0; i < m_tot_parts; i++)
{
- auto_inc= m_file[i]->get_auto_increment();
- set_if_bigger(max_auto_inc, auto_inc);
+ m_file[i]->get_auto_increment(offset, increment, nb_desired_values,
+ &first_value_part, &nb_reserved_values_part);
+ if (first_value_part == ~(ulonglong)(0)) // error in one partition
+ {
+ *first_value= first_value_part;
+ break;
+ }
+ /*
+ Partition has reserved an interval. Intersect it with the intervals
+ already reserved for the previous partitions.
+ */
+ last_value_part= (nb_reserved_values_part == ULONGLONG_MAX) ?
+ ULONGLONG_MAX : (first_value_part + nb_reserved_values_part * increment);
+ set_if_bigger(*first_value, first_value_part);
+ set_if_smaller(last_value, last_value_part);
+ }
+ if (last_value < *first_value) /* empty intersection, error */
+ {
+ *first_value= ~(ulonglong)(0);
}
- DBUG_RETURN(max_auto_inc);
+ *nb_reserved_values= (last_value == ULONGLONG_MAX) ?
+ ULONGLONG_MAX : ((last_value - *first_value) / increment);
+
+ DBUG_VOID_RETURN;
}
+void ha_partition::release_auto_increment()
+{
+ DBUG_ENTER("ha_partition::release_auto_increment");
+
+ for (uint i= 0; i < m_tot_parts; i++)
+ {
+ m_file[i]->release_auto_increment();
+ }
+ DBUG_VOID_RETURN;
+}
/****************************************************************************
MODULE initialise handler for HANDLER call
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 1443a20133c..44c50e0af3f 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -815,7 +815,11 @@ public:
-------------------------------------------------------------------------
*/
virtual void restore_auto_increment();
- virtual ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
+ virtual void release_auto_increment();
/*
-------------------------------------------------------------------------
diff --git a/sql/handler.cc b/sql/handler.cc
index f51e91f1882..7549315ffb6 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1639,7 +1639,7 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
Update the auto_increment field if necessary
SYNOPSIS
- update_auto_increment()
+ update_auto_increment()
RETURN
0 ok
@@ -1668,8 +1668,9 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
statement. For the following rows we generate new numbers based on the
last used number.
- - thd->next_insert_id != 0. This happens when we have read a statement
- from the binary log or when one has used SET LAST_INSERT_ID=#.
+ - thd->next_insert_id != 0. This happens when we have read an Intvar event
+ of type INSERT_ID_EVENT from the binary log or when one has used SET
+ INSERT_ID=#.
In this case we will set the column to the value of next_insert_id.
The next row will be given the id
@@ -1685,8 +1686,20 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
start counting from the inserted value.
thd->next_insert_id is cleared after it's been used for a statement.
+
+ TODO
+
+ Replace all references to "next number" or NEXT_NUMBER to
+ "auto_increment", everywhere (see below: there is
+ table->auto_increment_field_not_null, and there also exists
+ table->next_number_field, it's not consistent).
+
*/
+#define AUTO_INC_DEFAULT_NB_ROWS 1 // Some prefer 1024 here
+#define AUTO_INC_DEFAULT_NB_MAX_BITS 16
+#define AUTO_INC_DEFAULT_NB_MAX ((1 << AUTO_INC_DEFAULT_NB_MAX_BITS) - 1)
+
bool handler::update_auto_increment()
{
ulonglong nr;
@@ -1702,17 +1715,24 @@ bool handler::update_auto_increment()
*/
thd->prev_insert_id= thd->next_insert_id;
auto_increment_field_not_null= table->auto_increment_field_not_null;
- table->auto_increment_field_not_null= FALSE;
+ table->auto_increment_field_not_null= FALSE; // to reset for next row
if ((nr= table->next_number_field->val_int()) != 0 ||
auto_increment_field_not_null &&
thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
{
- /* Clear flag for next row */
- /* Mark that we didn't generate a new value **/
+ /*
+ The user did specify a value for the auto_inc column, we don't generate
+ a new value, write it down.
+ */
auto_increment_column_changed=0;
- /* Update next_insert_id if we have already generated a value */
+ /*
+ Update next_insert_id if we had already generated a value in this
+ statement (case of INSERT VALUES(null),(3763),(null):
+ the last NULL needs to insert 3764, not the value of the first NULL plus
+ 1).
+ */
if (thd->clear_next_insert_id && nr >= thd->next_insert_id)
{
if (variables->auto_increment_increment != 1)
@@ -1726,9 +1746,53 @@ bool handler::update_auto_increment()
}
if (!(nr= thd->next_insert_id))
{
- if ((nr= get_auto_increment()) == ~(ulonglong) 0)
+ ulonglong nb_desired_values= 1, nb_reserved_values;
+#ifdef TO_BE_ENABLED_SOON
+ /*
+ Reserved intervals will be stored in "THD::auto_inc_intervals".
+ handler::estimation_rows_to_insert will be the argument passed by
+ handler::ha_start_bulk_insert().
+ */
+ uint estimation_known= test(estimation_rows_to_insert > 0);
+ uint nb_already_reserved_intervals= thd->auto_inc_intervals.nb_elements();
+ /*
+ If an estimation was given to the engine:
+ - use it.
+ - if we already reserved numbers, it means the estimation was
+ not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_VALUES the 2nd
+ time, twice that the 3rd time etc.
+ If no estimation was given, use those increasing defaults from the
+ start, starting from AUTO_INC_DEFAULT_NB_VALUES.
+ Don't go beyond a max to not reserve "way too much" (because reservation
+ means potentially losing unused values).
+ */
+ if (nb_already_reserved_intervals == 0 && estimation_known)
+ nb_desired_values= estimation_rows_to_insert;
+ else /* go with the increasing defaults */
+ {
+ /* avoid overflow in formula, with this if() */
+ if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
+ {
+ nb_desired_values= AUTO_INC_DEFAULT_NB_VALUES *
+ (1 << nb_already_reserved_intervals);
+ set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
+ }
+ else
+ nb_desired_values= AUTO_INC_DEFAULT_NB_MAX;
+ }
+#endif
+ /* This call ignores all its parameters but nr, currently */
+ get_auto_increment(variables->auto_increment_offset,
+ variables->auto_increment_increment,
+ nb_desired_values, &nr,
+ &nb_reserved_values);
+ if (nr == ~(ulonglong) 0)
result= 1; // Mark failure
+ /*
+ That should not be needed when engines actually use offset and increment
+ above.
+ */
if (variables->auto_increment_increment != 1)
nr= next_insert_id(nr-1, variables);
/*
@@ -1786,7 +1850,29 @@ void handler::restore_auto_increment()
}
-ulonglong handler::get_auto_increment()
+/*
+ Reserves an interval of auto_increment values from the handler.
+
+ SYNOPSIS
+ get_auto_increment()
+ offset
+ increment
+ nb_desired_values how many values we want
+ first_value (OUT) the first value reserved by the handler
+ nb_reserved_values (OUT) how many values the handler reserved
+
+ offset and increment means that we want values to be of the form
+ offset + N * increment, where N>=0 is integer.
+ If the function sets *first_value to ~(ulonglong)0 it means an error.
+ If the function sets *nb_reserved_values to ULONGLONG_MAX it means it has
+ reserved to "positive infinite".
+
+*/
+
+void handler::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
ulonglong nr;
int error;
@@ -1796,6 +1882,12 @@ ulonglong handler::get_auto_increment()
if (!table->s->next_number_key_offset)
{ // Autoincrement at key-start
error=index_last(table->record[1]);
+ /*
+ MySQL implicitely assumes such method does locking (as MySQL decides to
+ use nr+increment without checking again with the handler, in
+ handler::update_auto_increment()), so reserves to infinite.
+ */
+ *nb_reserved_values= ULONGLONG_MAX;
}
else
{
@@ -1805,6 +1897,13 @@ ulonglong handler::get_auto_increment()
table->s->next_number_key_offset);
error= index_read(table->record[1], key, table->s->next_number_key_offset,
HA_READ_PREFIX_LAST);
+ /*
+ MySQL needs to call us for next row: assume we are inserting ("a",null)
+ here, we return 3, and next this statement will want to insert
+ ("b",null): there is no reason why ("b",3+1) would be the good row to
+ insert: maybe it already exists, maybe 3+1 is too large...
+ */
+ *nb_reserved_values= 1;
}
if (error)
@@ -1814,7 +1913,7 @@ ulonglong handler::get_auto_increment()
val_int_offset(table->s->rec_buff_length)+1);
index_end();
(void) extra(HA_EXTRA_NO_KEYREAD);
- return nr;
+ *first_value= nr;
}
diff --git a/sql/handler.h b/sql/handler.h
index 9a5a3b04823..ade26950c9a 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -777,6 +777,9 @@ class handler :public Sql_alloc
private:
virtual int reset() { return extra(HA_EXTRA_RESET); }
+ ha_rows estimation_rows_to_insert;
+ virtual void start_bulk_insert(ha_rows rows) {}
+ virtual int end_bulk_insert() {return 0; }
public:
const handlerton *ht; /* storage engine of this handler */
byte *ref; /* Pointer to current row */
@@ -821,7 +824,7 @@ public:
MY_BITMAP *write_set;
handler(const handlerton *ht_arg, TABLE_SHARE *share_arg)
- :table_share(share_arg), ht(ht_arg),
+ :table_share(share_arg), estimation_rows_to_insert(0), ht(ht_arg),
ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0),
delete_length(0), auto_increment_value(0),
records(0), deleted(0), mean_rec_length(0),
@@ -1242,7 +1245,11 @@ public:
*/
virtual int delete_all_rows()
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
- virtual ulonglong get_auto_increment();
+ virtual void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values);
+ virtual void release_auto_increment() { return; };
virtual void restore_auto_increment();
/*
@@ -1303,8 +1310,16 @@ public:
virtual int disable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int enable_indexes(uint mode) { return HA_ERR_WRONG_COMMAND; }
virtual int indexes_are_disabled(void) {return 0;}
- virtual void start_bulk_insert(ha_rows rows) {}
- virtual int end_bulk_insert() {return 0; }
+ void ha_start_bulk_insert(ha_rows rows)
+ {
+ estimation_rows_to_insert= rows;
+ start_bulk_insert(rows);
+ }
+ int ha_end_bulk_insert()
+ {
+ estimation_rows_to_insert= 0;
+ return end_bulk_insert();
+ }
virtual int discard_or_import_tablespace(my_bool discard)
{return HA_ERR_WRONG_COMMAND;}
virtual int net_read_dump(NET* net) { return HA_ERR_WRONG_COMMAND; }
diff --git a/sql/log.cc b/sql/log.cc
index 31133a71757..9f7fbb44ef6 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2946,6 +2946,11 @@ bool MYSQL_LOG::write(Log_event *event_info)
}
if (thd->insert_id_used)
{
+ /*
+ If the auto_increment was second in a table's index (possible with
+ MyISAM or BDB) (table->next_number_key_offset != 0), such event is
+ in fact not necessary. We could avoid logging it.
+ */
Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id);
if (e.write(file))
goto err;
diff --git a/sql/log_event.cc b/sql/log_event.cc
index ab9fa2975a1..b74cd7128cd 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -6142,7 +6142,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
how many rows are going to be inserted, then it can allocate needed memory
from the start.
*/
- table->file->start_bulk_insert(0);
+ table->file->ha_start_bulk_insert(0);
/*
We need TIMESTAMP_NO_AUTO_SET otherwise ha_write_row() will not use fill
any TIMESTAMP column with data from the row but instead will use
@@ -6165,7 +6165,7 @@ int Write_rows_log_event::do_before_row_operations(TABLE *table)
int Write_rows_log_event::do_after_row_operations(TABLE *table, int error)
{
if (error == 0)
- error= table->file->end_bulk_insert();
+ error= table->file->ha_end_bulk_insert();
return error;
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 84760e93d8e..0b89f2bf0be 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -428,7 +428,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
the code to make the call of end_bulk_insert() below safe.
*/
if (lock_type != TL_WRITE_DELAYED && !thd->prelocked_mode)
- table->file->start_bulk_insert(values_list.elements);
+ table->file->ha_start_bulk_insert(values_list.elements);
thd->no_trans_update= 0;
thd->abort_on_warning= (!ignore &&
@@ -553,7 +553,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
else
#endif
{
- if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error)
+ if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error)
{
table->file->print_error(my_errno,MYF(0));
error=1;
@@ -644,6 +644,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
thd->row_count_func= info.copied+info.deleted+info.updated;
::send_ok(thd, (ulong) thd->row_count_func, id, buff);
}
+ if (table != NULL)
+ table->file->release_auto_increment();
thd->abort_on_warning= 0;
DBUG_RETURN(FALSE);
@@ -652,6 +654,8 @@ abort:
if (lock_type == TL_WRITE_DELAYED)
end_delayed_insert(thd);
#endif
+ if (table != NULL)
+ table->file->release_auto_increment();
if (!joins_freed)
free_underlaid_joins(thd, &thd->lex->select_lex);
thd->abort_on_warning= 0;
@@ -971,7 +975,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
uint key_nr;
if (error != HA_WRITE_SKIP)
goto err;
- table->file->restore_auto_increment();
+ table->file->restore_auto_increment(); // it's too early here! BUG#20188
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
error=HA_WRITE_SKIP; /* Database can't find key */
@@ -2248,7 +2252,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
We won't start bulk inserts at all if this statement uses functions or
should invoke triggers since they may access to the same table too.
*/
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
}
restore_record(table,s->default_values); // Get empty record
table->next_number_field=table->found_next_number_field;
@@ -2299,7 +2303,7 @@ int select_insert::prepare2(void)
DBUG_ENTER("select_insert::prepare2");
if (thd->lex->current_select->options & OPTION_BUFFER_RESULT &&
!thd->prelocked_mode)
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
DBUG_RETURN(0);
}
@@ -2373,6 +2377,7 @@ bool select_insert::send_data(List<Item> &values)
last_insert_id= thd->insert_id();
}
}
+ table->file->release_auto_increment();
DBUG_RETURN(error);
}
@@ -2402,7 +2407,7 @@ void select_insert::send_error(uint errcode,const char *err)
DBUG_VOID_RETURN;
}
if (!thd->prelocked_mode)
- table->file->end_bulk_insert();
+ table->file->ha_end_bulk_insert();
/*
If at least one row has been inserted/modified and will stay in the table
(the table doesn't have transactions) we must write to the binlog (and
@@ -2458,7 +2463,7 @@ bool select_insert::send_eof()
int error,error2;
DBUG_ENTER("select_insert::send_eof");
- error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0;
+ error= (!thd->prelocked_mode) ? table->file->ha_end_bulk_insert():0;
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
/*
@@ -2724,7 +2729,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
if (info.ignore || info.handle_duplicates != DUP_ERROR)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (!thd->prelocked_mode)
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
thd->no_trans_update= 0;
thd->abort_on_warning= (!info.ignore &&
(thd->variables.sql_mode &
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index bf8a6b8cfbe..445b0262b35 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -117,7 +117,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
{
char name[FN_REFLEN];
File file;
- TABLE *table;
+ TABLE *table= NULL;
int error;
String *field_term=ex->field_term,*escaped=ex->escaped;
String *enclosed=ex->enclosed;
@@ -366,7 +366,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
if (!thd->prelocked_mode)
- table->file->start_bulk_insert((ha_rows) 0);
+ table->file->ha_start_bulk_insert((ha_rows) 0);
table->copy_blobs=1;
thd->no_trans_update= 0;
@@ -383,7 +383,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
error= read_sep_field(thd, info, table_list, fields_vars,
set_fields, set_values, read_info,
*enclosed, skip_lines, ignore);
- if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error)
+ if (!thd->prelocked_mode && table->file->ha_end_bulk_insert() && !error)
{
table->file->print_error(my_errno, MYF(0));
error= 1;
@@ -505,6 +505,8 @@ err:
mysql_unlock_tables(thd, thd->lock);
thd->lock=0;
}
+ if (table != NULL)
+ table->file->release_auto_increment();
thd->abort_on_warning= 0;
DBUG_RETURN(error);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 2212371db92..4d0e5c39bb5 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -9298,7 +9298,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
all places where a corresponding end_bulk_insert() should be put.
*/
table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */
- new_table.file->start_bulk_insert(table->file->records);
+ new_table.file->ha_start_bulk_insert(table->file->records);
#else
/* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */
new_table.file->extra(HA_EXTRA_WRITE_CACHE);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 289051c2dd1..f9420ca1f70 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6134,7 +6134,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
MODE_STRICT_ALL_TABLES));
from->file->info(HA_STATUS_VARIABLE);
- to->file->start_bulk_insert(from->file->records);
+ to->file->ha_start_bulk_insert(from->file->records);
save_sql_mode= thd->variables.sql_mode;
@@ -6254,7 +6254,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
free_io_cache(from);
delete [] copy; // This is never 0
- if (to->file->end_bulk_insert() && error <= 0)
+ if (to->file->ha_end_bulk_insert() && error <= 0)
{
to->file->print_error(my_errno,MYF(0));
error=1;