summaryrefslogtreecommitdiff
path: root/sql/ha_berkeley.cc
diff options
context:
space:
mode:
authorunknown <guilhem@mysql.com>2006-06-02 22:21:32 +0200
committerunknown <guilhem@mysql.com>2006-06-02 22:21:32 +0200
commite63f3779d4b19acddbc561e989293b7b4f0559d4 (patch)
treec0559b109baec8406c269ec627d05701d90cd012 /sql/ha_berkeley.cc
parent772758b6ecdec2551910a5c1f0cec5cee99315ec (diff)
downloadmariadb-git-e63f3779d4b19acddbc561e989293b7b4f0559d4.tar.gz
First push for WL#3146 "less locking in auto_increment". It is a 0-real-change patch.
New prototype for get_auto_increment() (but new arguments not yet used), to be able to reserve a finite interval of auto_increment values from cooperating engines. A hint on how many values to reserve is found in handler::estimation_rows_to_insert, filled by ha_start_bulk_insert(), new wrapper around start_bulk_insert(). NOTE: this patch changes nothing, for all engines. But it makes the API ready for those engines which will want to do reservation. More csets will come to complete WL#3146. sql/ha_berkeley.cc: update to new prototype of get_auto_increment sql/ha_berkeley.h: update to new prototype of get_auto_increment sql/ha_heap.cc: update to new prototype of get_auto_increment sql/ha_heap.h: update to new prototype of get_auto_increment sql/ha_innodb.cc: update to new prototype of get_auto_increment sql/ha_innodb.h: update to new prototype of get_auto_increment sql/ha_myisam.cc: update to new prototype of get_auto_increment sql/ha_myisam.h: update to new prototype of get_auto_increment sql/ha_ndbcluster.cc: update to new prototype of get_auto_increment sql/ha_ndbcluster.h: update to new prototype of get_auto_increment sql/ha_partition.cc: update to new prototype of get_auto_increment sql/ha_partition.h: update to new prototype of get_auto_increment sql/handler.cc: new prototype of get_auto_increment, comments, preparation for when the MySQL layer is capable of getting finite auto_increment intervals from cooperating engines. sql/handler.h: a wrapper around start_bulk_insert(): ha_bulk_insert(), which stores the argument (number of rows expected for insertion) into a member of handler: estimation_rows_to_insert. This member will soon be used to decide how much auto_increment values we want to reserve from cooperating engines. New prototype for get_auto_increment, preparing for cooperating engines to reserve finite auto_increment intervals. release_auto_increment() will be used by the MySQL layer to inform the engine that it has not used all of the interval (engine can reclaim it). sql/log.cc: note for the future sql/log_event.cc: name of wrapper sql/sql_insert.cc: name of wrapper. When done with inserting, return unused auto_inc values to engine. sql/sql_load.cc: name of wrapper. When done with inserting, return unused auto_inc values to engine. sql/sql_select.cc: name of wrapper sql/sql_table.cc: name of wrapper storage/archive/ha_archive.cc: update to new prototype of get_auto_increment. Archive reserves only one value (Archive's god - Brian - told me that's the truth). storage/archive/ha_archive.h: update to new prototype of get_auto_increment()
Diffstat (limited to 'sql/ha_berkeley.cc')
-rw-r--r--sql/ha_berkeley.cc17
1 files changed, 15 insertions, 2 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 473fb149871..dfdffcb70b5 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -2255,8 +2255,12 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key,
}
-ulonglong ha_berkeley::get_auto_increment()
+void ha_berkeley::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
+ /* Ideally in case of real error (not "empty table") nr should be ~ULL(0) */
ulonglong nr=1; // Default if error or new key
int error;
(void) ha_berkeley::extra(HA_EXTRA_KEYREAD);
@@ -2267,9 +2271,18 @@ ulonglong ha_berkeley::get_auto_increment()
if (!table_share->next_number_key_offset)
{ // Autoincrement at key-start
error=ha_berkeley::index_last(table->record[1]);
+ /* has taken read lock on page of max key so reserves to infinite */
+ *nb_reserved_values= ULONGLONG_MAX;
}
else
{
+ /*
+ MySQL needs to call us for next row: assume we are inserting ("a",null)
+ here, we return 3, and next this statement will want to insert ("b",null):
+ there is no reason why ("b",3+1) would be the good row to insert: maybe it
+ already exists, maybe 3+1 is too large...
+ */
+ *nb_reserved_values= 1;
DBT row,old_key;
bzero((char*) &row,sizeof(row));
KEY *key_info= &table->key_info[active_index];
@@ -2310,7 +2323,7 @@ ulonglong ha_berkeley::get_auto_increment()
table->next_number_field->val_int_offset(table_share->rec_buff_length)+1;
ha_berkeley::index_end();
(void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD);
- return nr;
+ *first_value= nr;
}
void ha_berkeley::print_error(int error, myf errflag)