summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <monty@donna.mysql.com>2000-12-10 21:12:12 +0200
committerunknown <monty@donna.mysql.com>2000-12-10 21:12:12 +0200
commit71c02e764679e243ce4092b9d43ef1f7e81c42ef (patch)
treef8973f362589c9ab0e0f7ab7557e4976b23351b4
parent8beb43501be6bb4a4bd09e2b0aadc815dcc2f606 (diff)
parentb6f230879526d97e8fb019013d732d50b985296b (diff)
downloadmariadb-git-71c02e764679e243ce4092b9d43ef1f7e81c42ef.tar.gz
Merge work:/my/mysql into donna.mysql.com:/home/my/bk/mysql
Docs/manual.texi: Auto merged
-rw-r--r--Docs/manual.texi21
-rw-r--r--sql/ha_berkeley.cc49
-rw-r--r--sql/sql_table.cc62
-rw-r--r--sql/sql_update.cc11
4 files changed, 98 insertions, 45 deletions
diff --git a/Docs/manual.texi b/Docs/manual.texi
index b0a60772b98..f6abd3f9159 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -17897,6 +17897,11 @@ tables as one. This only works with MERGE tables. @xref{MERGE}.
For the moment you need to have @code{SELECT}, @code{UPDATE}, and
@code{DELETE} privileges on the tables you map to a @code{MERGE} table.
All mapped tables must be in the same database as the @code{MERGE} table.
+@item
+In the created table the @code{PRIMARY} key will be placed first, followed
+by all @code{UNIQUE} keys and then the normal keys. This helps the
+@strong{MySQL} optimizer to prioritize which key to use and also more quickly
+detect duplicated @code{UNIQUE} keys.
@end itemize
@cindex silent column changes
@@ -22598,7 +22603,7 @@ You may also want to change @code{binlog_cache_size} and
@itemize @bullet
@item
@strong{MySQL} requires a @code{PRIMARY KEY} in each BDB table to be
-able to refer to previously read rows. If you don't create on,
+able to refer to previously read rows. If you don't create one,
@strong{MySQL} will create an maintain a hidden @code{PRIMARY KEY} for
you. The hidden key has a length of 5 bytes and is incremented for each
insert attempt.
@@ -22618,8 +22623,6 @@ you don't use @code{LOCK TABLE}, @strong{MYSQL} will issue an internal
multiple-write lock on the table to ensure that the table will be
properly locked if another thread issues a table lock.
@item
-@code{ALTER TABLE} doesn't yet work on @code{BDB} tables.
-@item
Internal locking in @code{BDB} tables is done on page level.
@item
@code{SELECT COUNT(*) FROM table_name} is slow as @code{BDB} tables doesn't
@@ -22637,8 +22640,8 @@ tables. In other words, the key information will take a little more
space in @code{BDB} tables compared to MyISAM tables which don't use
@code{PACK_KEYS=0}.
@item
-There is often holes in the BDB table to allow you to insert new rows
-between different keys. This makes BDB tables somewhat larger than
+There is often holes in the BDB table to allow you to insert new rows in
+the middle of the key tree. This makes BDB tables somewhat larger than
MyISAM tables.
@item
@strong{MySQL} performs a checkpoint each time a new Berkeley DB log
@@ -39762,6 +39765,14 @@ though, so Version 3.23 is not released as a stable version yet.
@appendixsubsec Changes in release 3.23.29
@itemize @bullet
@item
+When creating a table, put @code{PRIMARY} keys first, followed by
+@code{UNIQUE} keys.
+@item
+Fixed a bug in @code{UPDATE} involving multi-part keys where one
+specified all key parts both in the update and the @code{WHERE} part. In
+this case @strong{MySQL} could try to update a record that didn't match
+the whole @code{WHERE} part.
+@item
Changed drop table to first drop the tables and then the @code{.frm} file.
@item
Fixed a bug in the hostname cache which caused @code{mysqld} to report the
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index fc293c1e8af..45a638c8424 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -1660,7 +1660,8 @@ longlong ha_berkeley::get_auto_increment()
}
else
{
- DBT row;
+ DBT row,old_key;
+ DBC *auto_cursor;
bzero((char*) &row,sizeof(row));
uint key_len;
KEY *key_info= &table->key_info[active_index];
@@ -1670,27 +1671,37 @@ longlong ha_berkeley::get_auto_increment()
key_buff, table->record[0],
table->next_number_key_offset);
/* Store for compare */
- memcpy(key_buff2, key_buff, (key_len=last_key.size));
- /* Modify the compare so that we will find the next key */
- key_info->handler.bdb_return_if_eq= 1;
- /* We lock the next key as the new key will probl. be on the same page */
- error=cursor->c_get(cursor, &last_key, &row, DB_SET_RANGE | DB_RMW),
- key_info->handler.bdb_return_if_eq= 0;
-
- if (!error || error == DB_NOTFOUND)
+ memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size));
+ error=1;
+ if (!(file->cursor(key_file[active_index], transaction, &auto_cursor, 0)))
{
- /*
- Now search go one step back and then we should have found the
- biggest key with the given prefix
- */
- if (read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV | DB_RMW),
- table->record[1], active_index, &row, (DBT*) 0, 0) ||
- berkeley_key_cmp(table, key_info, key_buff2, key_len))
- error=1; // Something went wrong or no such key
+ /* Modify the compare so that we will find the next key */
+ key_info->handler.bdb_return_if_eq= 1;
+ /* We lock the next key as the new key will probl. be on the same page */
+ error=auto_cursor->c_get(auto_cursor, &last_key, &row,
+ DB_SET_RANGE | DB_RMW);
+ key_info->handler.bdb_return_if_eq= 0;
+ if (!error || error == DB_NOTFOUND)
+ {
+ /*
+ Now search go one step back and then we should have found the
+ biggest key with the given prefix
+ */
+ error=1;
+ if (!auto_cursor->c_get(auto_cursor, &last_key, &row, DB_PREV | DB_RMW)
+ && !berkeley_cmp_packed_key(key_file[active_index], &old_key,
+ &last_key))
+ {
+ error=0; // Found value
+ unpack_key(table->record[1], &last_key, active_index);
+ }
+ }
+ auto_cursor->c_close(auto_cursor);
}
}
- nr=(longlong)
- table->next_number_field->val_int_offset(table->rec_buff_length)+1;
+ if (!error)
+ nr=(longlong)
+ table->next_number_field->val_int_offset(table->rec_buff_length)+1;
ha_berkeley::index_end();
(void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD);
return nr;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 78b202e538c..778a60859b2 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -327,18 +327,28 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
}
/* Create keys */
+
List_iterator<Key> key_iterator(keys);
uint key_parts=0,key_count=keys.elements;
- bool primary_key=0,unique_key=0;
+ List<Key> keys_in_order; // Add new keys here
+ Key *primary_key=0;
+ bool unique_key=0;
Key *key;
uint tmp;
tmp=min(file->max_keys(), MAX_KEY);
-
if (key_count > tmp)
{
my_error(ER_TOO_MANY_KEYS,MYF(0),tmp);
DBUG_RETURN(-1);
}
+
+ /*
+ Check keys;
+ Put PRIMARY KEY first, then UNIQUE keys and other keys last
+ This will make checking for duplicated keys faster and ensure that
+ primary keys are prioritized.
+ */
+
while ((key=key_iterator++))
{
tmp=max(file->max_key_parts(),MAX_REF_PARTS);
@@ -353,17 +363,6 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
DBUG_RETURN(-1);
}
key_parts+=key->columns.elements;
- }
- key_info_buffer=key_info=(KEY*) sql_calloc(sizeof(KEY)*key_count);
- key_part_info=(KEY_PART_INFO*) sql_calloc(sizeof(KEY_PART_INFO)*key_parts);
- if (!key_info_buffer || ! key_part_info)
- DBUG_RETURN(-1); // Out of memory
-
- key_iterator.rewind();
- for (; (key=key_iterator++) ; key_info++)
- {
- uint key_length=0;
- key_part_spec *column;
if (key->type == Key::PRIMARY)
{
if (primary_key)
@@ -371,10 +370,39 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
my_error(ER_MULTIPLE_PRI_KEY,MYF(0));
DBUG_RETURN(-1);
}
- primary_key=1;
+ primary_key=key;
}
else if (key->type == Key::UNIQUE)
+ {
unique_key=1;
+ if (keys_in_order.push_front(key))
+ DBUG_RETURN(-1);
+ }
+ else if (keys_in_order.push_back(key))
+ DBUG_RETURN(-1);
+ }
+ if (primary_key)
+ {
+ if (keys_in_order.push_front(primary_key))
+ DBUG_RETURN(-1);
+ }
+ else if (!unique_key && (file->option_flag() & HA_REQUIRE_PRIMARY_KEY))
+ {
+ my_error(ER_REQUIRES_PRIMARY_KEY,MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ key_info_buffer=key_info=(KEY*) sql_calloc(sizeof(KEY)*key_count);
+ key_part_info=(KEY_PART_INFO*) sql_calloc(sizeof(KEY_PART_INFO)*key_parts);
+ if (!key_info_buffer || ! key_part_info)
+ DBUG_RETURN(-1); // Out of memory
+
+ List_iterator<Key> key_iterator_in_order(keys_in_order);
+ for (; (key=key_iterator_in_order++) ; key_info++)
+ {
+ uint key_length=0;
+ key_part_spec *column;
+
key_info->flags= (key->type == Key::MULTIPLE) ? 0 :
(key->type == Key::FULLTEXT) ? HA_FULLTEXT : HA_NOSAME;
key_info->key_parts=(uint8) key->columns.elements;
@@ -508,12 +536,6 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
my_error(ER_WRONG_AUTO_KEY,MYF(0));
DBUG_RETURN(-1);
}
- if (!primary_key && !unique_key &&
- (file->option_flag() & HA_REQUIRE_PRIMARY_KEY))
- {
- my_error(ER_REQUIRES_PRIMARY_KEY,MYF(0));
- DBUG_RETURN(-1);
- }
/* Check if table exists */
if (create_info->options & HA_LEX_CREATE_TMP_TABLE)
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index c52370c02fd..fe91450d309 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -75,8 +75,16 @@ int mysql_update(THD *thd,TABLE_LIST *table_list,List<Item> &fields,
if (table->timestamp_field && // Don't set timestamp if used
table->timestamp_field->query_id == thd->query_id)
table->time_stamp=0;
+
+ /* Reset the query_id string so that ->used_keys is based on the WHERE */
+
table->used_keys=table->keys_in_use;
table->quick_keys=0;
+ reg2 Item *item;
+ List_iterator<Item> it(fields);
+ ulong query_id=thd->query_id-1;
+ while ((item=it++))
+ ((Item_field*) item)->field->query_id=query_id;
if (setup_fields(thd,table_list,values,0,0) ||
setup_conds(thd,table_list,&conds))
{
@@ -84,7 +92,8 @@ int mysql_update(THD *thd,TABLE_LIST *table_list,List<Item> &fields,
DBUG_RETURN(-1); /* purecov: inspected */
}
old_used_keys=table->used_keys;
- table->used_keys=0; // Can't use 'only index'
+ // Don't count on usage of 'only index' when calculating which key to use
+ table->used_keys=0;
select=make_select(table,0,0,conds,&error);
if (error ||
(select && select->check_quick(test(thd->options & SQL_SAFE_UPDATES),