summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <joreland@mysql.com>2004-12-06 14:52:31 +0100
committerunknown <joreland@mysql.com>2004-12-06 14:52:31 +0100
commitbf0b3493d695ec32814017be1f2da059b65982c0 (patch)
tree3071885324d9cba2e6369bdea65d4d7f3c56ab03 /sql
parenteb05d78dcb8074a5dfceedf8c86681531fc3bfbe (diff)
parentb1f4a482f4545999d6210aabdc2c0a80ee574374 (diff)
downloadmariadb-git-bf0b3493d695ec32814017be1f2da059b65982c0.tar.gz
Merge mysql.com:/home/jonas/src/mysql-4.1-fix
into mysql.com:/home/jonas/src/wl1744 BitKeeper/etc/logging_ok: auto-union configure.in: Auto merged ndb/include/Makefile.am: Auto merged ndb/src/common/mgmcommon/ConfigRetriever.cpp: Auto merged ndb/src/common/util/version.c: Auto merged ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Auto merged ndb/src/kernel/blocks/dbdict/Dbdict.hpp: Auto merged ndb/src/kernel/blocks/dbdih/Dbdih.hpp: Auto merged ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: Auto merged ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Auto merged ndb/src/mgmsrv/main.cpp: Auto merged ndb/src/ndbapi/NdbConnection.cpp: Auto merged sql/ha_ndbcluster.cc: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_heap.cc55
-rw-r--r--sql/ha_heap.h10
-rw-r--r--sql/ha_innodb.cc20
-rw-r--r--sql/ha_ndbcluster.cc119
-rw-r--r--sql/ha_ndbcluster.h4
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/slave.cc4
-rw-r--r--sql/slave.h2
-rw-r--r--sql/sql_insert.cc7
-rw-r--r--sql/sql_parse.cc8
-rw-r--r--sql/sql_yacc.yy12
-rw-r--r--sql/structs.h7
12 files changed, 190 insertions, 60 deletions
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 19b15c6fbcc..dbcf7bc9197 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -30,6 +30,18 @@
const char **ha_heap::bas_ext() const
{ static const char *ext[1]= { NullS }; return ext; }
+/*
+ Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
+ rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records
+ have been inserted/updated/deleted. delete_all_rows() and table flush cause
+ immediate update.
+
+ NOTE
+ hash index statistics must be updated when number of table records changes
+ from 0 to non-zero value and vice versa. Otherwise records_in_range may
+ erroneously return 0 and 'range' may miss records.
+*/
+#define HEAP_STATS_UPDATE_THRESHOLD 10
int ha_heap::open(const char *name, int mode, uint test_if_locked)
{
@@ -48,6 +60,8 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
{
/* Initialize variables for the opened table */
set_keys_for_scanning();
+ if (table->tmp_table == NO_TMP_TABLE)
+ update_key_stats();
}
return (file ? 0 : 1);
}
@@ -84,28 +98,58 @@ void ha_heap::set_keys_for_scanning(void)
}
}
+void ha_heap::update_key_stats()
+{
+ for (uint i= 0; i < table->keys; i++)
+ {
+ KEY *key=table->key_info+i;
+ if (key->algorithm != HA_KEY_ALG_BTREE)
+ {
+ ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
+ key->rec_per_key[key->key_parts-1]=
+ hash_buckets ? file->s->records/hash_buckets : 0;
+ }
+ }
+ records_changed= 0;
+}
+
int ha_heap::write_row(byte * buf)
{
+ int res;
statistic_increment(ha_write_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
update_auto_increment();
- return heap_write(file,buf);
+ res= heap_write(file,buf);
+ if (!res && table->tmp_table == NO_TMP_TABLE &&
+ ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
+ update_key_stats();
+ return res;
}
int ha_heap::update_row(const byte * old_data, byte * new_data)
{
+ int res;
statistic_increment(ha_update_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
- return heap_update(file,old_data,new_data);
+ res= heap_update(file,old_data,new_data);
+ if (!res && table->tmp_table == NO_TMP_TABLE &&
+ ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
+ update_key_stats();
+ return res;
}
int ha_heap::delete_row(const byte * buf)
{
+ int res;
statistic_increment(ha_delete_count,&LOCK_status);
- return heap_delete(file,buf);
+ res= heap_delete(file,buf);
+ if (!res && table->tmp_table == NO_TMP_TABLE &&
+ ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
+ update_key_stats();
+ return res;
}
int ha_heap::index_read(byte * buf, const byte * key, uint key_len,
@@ -227,6 +271,8 @@ int ha_heap::extra(enum ha_extra_function operation)
int ha_heap::delete_all_rows()
{
heap_clear(file);
+ if (table->tmp_table == NO_TMP_TABLE)
+ update_key_stats();
return 0;
}
@@ -384,7 +430,8 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
min_key->flag != HA_READ_KEY_EXACT ||
max_key->flag != HA_READ_AFTER_KEY)
return HA_POS_ERROR; // Can only use exact keys
- return 10; // Good guess
+ else
+ return key->rec_per_key[key->key_parts-1];
}
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index 9ca6b9b76b6..f36e9f31c55 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -27,9 +27,10 @@ class ha_heap: public handler
{
HP_INFO *file;
key_map btree_keys;
-
- public:
- ha_heap(TABLE *table): handler(table), file(0) {}
+ /* number of records changed since last statistics update */
+ uint records_changed;
+public:
+ ha_heap(TABLE *table): handler(table), file(0), records_changed(0) {}
~ha_heap() {}
const char *table_type() const { return "HEAP"; }
const char *index_type(uint inx)
@@ -91,5 +92,6 @@ class ha_heap: public handler
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
-
+private:
+ void update_key_stats();
};
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 6e08fc680b2..07d8da63733 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -1505,17 +1505,14 @@ innobase_close_connection(
*****************************************************************************/
/********************************************************************
-This function is not relevant since we store the tables and indexes
-into our own tablespace, not as files, whose extension this function would
-give. */
+Gives the file extension of an InnoDB single-table tablespace. */
const char**
ha_innobase::bas_ext() const
/*========================*/
- /* out: file extension strings, currently not
- used */
+ /* out: file extension string */
{
- static const char* ext[] = {".InnoDB", NullS};
+ static const char* ext[] = {".ibd", NullS};
return(ext);
}
@@ -2390,8 +2387,9 @@ ha_innobase::write_row(
same SQL statement! */
if (auto_inc == 0 && user_thd->next_insert_id != 0) {
- auto_inc = user_thd->next_insert_id;
- auto_inc_counter_for_this_stat = auto_inc;
+
+ auto_inc_counter_for_this_stat
+ = user_thd->next_insert_id;
}
if (auto_inc == 0 && auto_inc_counter_for_this_stat) {
@@ -2399,14 +2397,14 @@ ha_innobase::write_row(
this SQL statement with SET INSERT_ID. We must
assign sequential values from the counter. */
- auto_inc_counter_for_this_stat++;
- incremented_auto_inc_for_stat = TRUE;
-
auto_inc = auto_inc_counter_for_this_stat;
/* We give MySQL a new value to place in the
auto-inc column */
user_thd->next_insert_id = auto_inc;
+
+ auto_inc_counter_for_this_stat++;
+ incremented_auto_inc_for_stat = TRUE;
}
if (auto_inc != 0) {
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 331988f87c8..fe1a41f4ae2 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -109,7 +109,7 @@ static const err_code_mapping err_map[]=
{
{ 626, HA_ERR_KEY_NOT_FOUND },
{ 630, HA_ERR_FOUND_DUPP_KEY },
- { 893, HA_ERR_FOUND_DUPP_UNIQUE },
+ { 893, HA_ERR_FOUND_DUPP_KEY }, // Unique constraint
{ 721, HA_ERR_TABLE_EXIST },
{ 4244, HA_ERR_TABLE_EXIST },
@@ -1018,7 +1018,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
- m_retrieve_all_fields)
+ m_retrieve_all_fields ||
+ (field->flags & PRI_KEY_FLAG) && m_retrieve_primary_key)
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
@@ -1029,7 +1030,6 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
m_value[i].ptr= NULL;
}
}
-
if (execute_no_commit_ie(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
@@ -1093,6 +1093,34 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
DBUG_RETURN(0);
}
+/*
+ Peek to check if a particular row already exists
+*/
+
+int ha_ndbcluster::peek_row()
+{
+ NdbConnection *trans= m_active_trans;
+ NdbOperation *op;
+ THD *thd= current_thd;
+ DBUG_ENTER("peek_row");
+
+ NdbOperation::LockMode lm=
+ (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type);
+ if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) ||
+ op->readTuple(lm) != 0)
+ ERR_RETURN(trans->getNdbError());
+
+ int res;
+ if ((res= set_primary_key(op)))
+ ERR_RETURN(trans->getNdbError());
+
+ if (execute_no_commit_ie(this,trans) != 0)
+ {
+ table->status= STATUS_NOT_FOUND;
+ DBUG_RETURN(ndb_err(trans));
+ }
+ DBUG_RETURN(0);
+}
/*
Read one record from NDB using unique secondary index
@@ -1128,9 +1156,10 @@ int ha_ndbcluster::unique_index_read(const byte *key,
for (i= 0; key_part != end; key_part++, i++)
{
- if (set_ndb_key(op, key_part->field, i, key_ptr))
+ if (set_ndb_key(op, key_part->field, i,
+ key_part->null_bit ? key_ptr + 1 : key_ptr))
ERR_RETURN(trans->getNdbError());
- key_ptr+= key_part->length;
+ key_ptr+= key_part->store_length;
}
// Get non-index attribute(s)
@@ -1138,7 +1167,7 @@ int ha_ndbcluster::unique_index_read(const byte *key,
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
- (field->flags & PRI_KEY_FLAG))
+ (field->flags & PRI_KEY_FLAG)) // && m_retrieve_primary_key ??
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
@@ -1566,7 +1595,7 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len,
Field* field= key_part->field;
uint ndb_fieldnr= key_part->fieldnr-1;
DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr));
- // const NDBCOL *col= tab->getColumn(ndb_fieldnr);
+ //const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr);
uint32 field_len= field->pack_length();
DBUG_DUMP("key", (char*)key, field_len);
@@ -1635,9 +1664,17 @@ int ha_ndbcluster::write_row(byte *record)
int res;
DBUG_ENTER("write_row");
- if(m_ignore_dup_key_not_supported)
+ if(m_ignore_dup_key && table->primary_key != MAX_KEY)
{
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ int peek_res= peek_row();
+
+ if (!peek_res)
+ {
+ m_dupkey= table->primary_key;
+ DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY);
+ }
+ if (peek_res != HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN(peek_res);
}
statistic_increment(ha_write_count,&LOCK_status);
@@ -1791,7 +1828,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
NdbOperation *op;
uint i;
DBUG_ENTER("update_row");
-
+
statistic_increment(ha_update_count,&LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
@@ -2144,22 +2181,19 @@ void ha_ndbcluster::print_results()
}
case NdbDictionary::Column::Decimal: {
char *value= field->ptr;
-
fprintf(DBUG_FILE, "Decimal\t'%-*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Char:{
- char buf[field->pack_length()+1];
- char *value= (char *) field->ptr;
- snprintf(buf, field->pack_length(), "%s", value);
- fprintf(DBUG_FILE, "Char\t'%s'", buf);
+ const char *value= (char *) field->ptr;
+ fprintf(DBUG_FILE, "Char\t'%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Varchar:
case NdbDictionary::Column::Binary:
case NdbDictionary::Column::Varbinary: {
- char *value= (char *) field->ptr;
- fprintf(DBUG_FILE, "'%s'", value);
+ const char *value= (char *) field->ptr;
+ fprintf(DBUG_FILE, "Var\t'%.*s'", field->pack_length(), value);
break;
}
case NdbDictionary::Column::Datetime: {
@@ -2210,6 +2244,28 @@ int ha_ndbcluster::index_end()
DBUG_RETURN(close_scan());
}
+/**
+ * Check if key contains null
+ */
+static
+int
+check_null_in_key(const KEY* key_info, const byte *key, uint key_len)
+{
+ KEY_PART_INFO *curr_part, *end_part;
+ const byte* end_ptr = key + key_len;
+ curr_part= key_info->key_part;
+ end_part= curr_part + key_info->key_parts;
+
+
+ for (; curr_part != end_part && key < end_ptr; curr_part++)
+ {
+ if(curr_part->null_bit && *key)
+ return 1;
+
+ key += curr_part->store_length;
+ }
+ return 0;
+}
int ha_ndbcluster::index_read(byte *buf,
const byte *key, uint key_len,
@@ -2227,6 +2283,8 @@ int ha_ndbcluster::index_read(byte *buf,
case PRIMARY_KEY_INDEX:
if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
DBUG_RETURN(pk_read(key, key_len, buf));
}
else if (type == PRIMARY_KEY_INDEX)
@@ -2236,8 +2294,11 @@ int ha_ndbcluster::index_read(byte *buf,
break;
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
- if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len)
+ if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len &&
+ !check_null_in_key(key_info, key, key_len))
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
DBUG_RETURN(unique_index_read(key, key_len, buf));
}
else if (type == UNIQUE_INDEX)
@@ -2341,6 +2402,8 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
start_key->length == key_info->key_length &&
start_key->flag == HA_READ_KEY_EXACT)
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
error= pk_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
@@ -2348,10 +2411,12 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
case UNIQUE_ORDERED_INDEX:
case UNIQUE_INDEX:
key_info= table->key_info + active_index;
- if (start_key &&
- start_key->length == key_info->key_length &&
- start_key->flag == HA_READ_KEY_EXACT)
+ if (start_key && start_key->length == key_info->key_length &&
+ start_key->flag == HA_READ_KEY_EXACT &&
+ !check_null_in_key(key_info, start_key->key, start_key->length))
{
+ if(m_active_cursor && (error= close_scan()))
+ DBUG_RETURN(error);
error= unique_index_read(start_key->key, start_key->length, buf);
DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error);
}
@@ -2653,15 +2718,15 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
m_use_write= TRUE;
} else
{
- if (table->keys)
- m_ignore_dup_key_not_supported= TRUE;
+ DBUG_PRINT("info", ("Ignoring duplicate key"));
+ m_ignore_dup_key= TRUE;
}
break;
case HA_EXTRA_NO_IGNORE_DUP_KEY:
DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY"));
DBUG_PRINT("info", ("Turning OFF use of write instead of insert"));
m_use_write= FALSE;
- m_ignore_dup_key_not_supported= FALSE;
+ m_ignore_dup_key= FALSE;
break;
case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those
where field->query_id is the same as
@@ -2680,6 +2745,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_RETRIEVE_PRIMARY_KEY:
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_PRIMARY_KEY"));
+ m_retrieve_primary_key= TRUE;
break;
case HA_EXTRA_CHANGE_KEY_TO_UNIQUE:
DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_UNIQUE"));
@@ -2942,6 +3008,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_ASSERT(m_active_trans);
// Start of transaction
m_retrieve_all_fields= FALSE;
+ m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
{
NDBDICT *dict= m_ndb->getDictionary();
@@ -3034,6 +3101,7 @@ int ha_ndbcluster::start_stmt(THD *thd)
// Start of statement
m_retrieve_all_fields= FALSE;
+ m_retrieve_primary_key= FALSE;
m_ops_pending= 0;
DBUG_RETURN(error);
@@ -3640,9 +3708,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
HA_NO_PREFIX_CHAR_KEYS),
m_share(0),
m_use_write(FALSE),
- m_ignore_dup_key_not_supported(FALSE),
+ m_ignore_dup_key(FALSE),
m_primary_key_update(FALSE),
m_retrieve_all_fields(FALSE),
+ m_retrieve_primary_key(FALSE),
m_rows_to_insert(1),
m_rows_inserted(0),
m_bulk_insert_rows(1024),
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 2121228a869..c6c233c013c 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -183,6 +183,7 @@ class ha_ndbcluster: public handler
int pk_read(const byte *key, uint key_len, byte *buf);
int complemented_pk_read(const byte *old_data, byte *new_data);
+ int peek_row();
int unique_index_read(const byte *key, uint key_len,
byte *buf);
int ordered_index_scan(const key_range *start_key,
@@ -242,9 +243,10 @@ class ha_ndbcluster: public handler
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
- bool m_ignore_dup_key_not_supported;
+ bool m_ignore_dup_key;
bool m_primary_key_update;
bool m_retrieve_all_fields;
+ bool m_retrieve_primary_key;
ha_rows m_rows_to_insert;
ha_rows m_rows_inserted;
ha_rows m_bulk_insert_rows;
diff --git a/sql/item_func.cc b/sql/item_func.cc
index bf85e5b378a..c07e9f23ea2 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -264,7 +264,7 @@ Item_func::Item_func(THD *thd, Item_func *item)
Sets as a side effect the following class variables:
maybe_null Set if any argument may return NULL
with_sum_func Set if any of the arguments contains a sum function
- used_table_cache Set to union of the arguments used table
+ used_tables_cache Set to union of the tables used by arguments
str_value.charset If this is a string function, set this to the
character set for the first argument.
diff --git a/sql/slave.cc b/sql/slave.cc
index b6ad35573ff..6bc977e8d41 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1032,7 +1032,7 @@ bool net_request_file(NET* net, const char* fname)
}
-const char *rewrite_db(const char* db, uint *new_len)
+const char *rewrite_db(const char* db, uint32 *new_len)
{
if (replicate_rewrite_db.is_empty() || !db)
return db;
@@ -1043,7 +1043,7 @@ const char *rewrite_db(const char* db, uint *new_len)
{
if (!strcmp(tmp->key, db))
{
- *new_len= strlen(tmp->val);
+ *new_len= (uint32)strlen(tmp->val);
return tmp->val;
}
}
diff --git a/sql/slave.h b/sql/slave.h
index 97f197fb50a..c6cb81699a2 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -510,7 +510,7 @@ int add_table_rule(HASH* h, const char* table_spec);
int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec);
void init_table_rule_hash(HASH* h, bool* h_inited);
void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited);
-const char *rewrite_db(const char* db, uint *new_db_len);
+const char *rewrite_db(const char* db, uint32 *new_db_len);
const char *print_slave_db_safe(const char *db);
int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code);
void skip_load_data_infile(NET* net);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 822ce622b1b..d590d3b5093 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -453,8 +453,8 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
setup_tables(insert_table_list) ||
setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0) ||
(duplic == DUP_UPDATE &&
- (setup_fields(thd, 0, insert_table_list, update_fields, 0, 0, 0) ||
- setup_fields(thd, 0, insert_table_list, update_values, 0, 0, 0))))
+ (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0) ||
+ setup_fields(thd, 0, insert_table_list, update_values, 1, 0, 0))))
DBUG_RETURN(-1);
if (find_real_table_in_list(table_list->next,
table_list->db, table_list->real_name))
@@ -462,6 +462,9 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name);
DBUG_RETURN(-1);
}
+ if (duplic == DUP_UPDATE || duplic == DUP_REPLACE)
+ table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
+
DBUG_RETURN(0);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 7420f9de100..e066e447345 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -4123,7 +4123,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length)
- SET uses tot_length.
*/
void calculate_interval_lengths(THD *thd, TYPELIB *interval,
- uint *max_length, uint *tot_length)
+ uint32 *max_length, uint32 *tot_length)
{
const char **pos;
uint *len;
@@ -4135,7 +4135,7 @@ void calculate_interval_lengths(THD *thd, TYPELIB *interval,
*len= (uint) strip_sp((char*) *pos);
uint length= cs->cset->numchars(cs, *pos, *pos + *len);
*tot_length+= length;
- set_if_bigger(*max_length, length);
+ set_if_bigger(*max_length, (uint32)length);
}
}
@@ -4454,7 +4454,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
if (new_field->pack_length > 4)
new_field->pack_length=8;
new_field->interval=interval;
- uint dummy_max_length;
+ uint32 dummy_max_length;
calculate_interval_lengths(thd, interval,
&dummy_max_length, &new_field->length);
new_field->length+= (interval->count - 1);
@@ -4484,7 +4484,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
new_field->interval=interval;
new_field->pack_length=interval->count < 256 ? 1 : 2; // Should be safe
- uint dummy_tot_length;
+ uint32 dummy_tot_length;
calculate_interval_lengths(thd, interval,
&new_field->length, &dummy_tot_length);
set_if_smaller(new_field->length,MAX_FIELD_WIDTH-1);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index d842b4b66bb..8cbfaf3f99b 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1402,6 +1402,9 @@ type:
| BINARY '(' NUM ')' { Lex->length=$3.str;
Lex->charset=&my_charset_bin;
$$=FIELD_TYPE_STRING; }
+ | BINARY { Lex->length= (char*) "1";
+ Lex->charset=&my_charset_bin;
+ $$=FIELD_TYPE_STRING; }
| varchar '(' NUM ')' opt_binary { Lex->length=$3.str;
$$=FIELD_TYPE_VAR_STRING; }
| nvarchar '(' NUM ')' { Lex->length=$3.str;
@@ -3929,11 +3932,12 @@ do: DO_SYM
{
LEX *lex=Lex;
lex->sql_command = SQLCOM_DO;
- if (!(lex->insert_list = new List_item))
- YYABORT;
+ mysql_init_select(lex);
+ }
+ expr_list
+ {
+ Lex->insert_list= $3;
}
- values
- {}
;
/*
diff --git a/sql/structs.h b/sql/structs.h
index c30d85f59cb..846b3400fab 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -89,7 +89,12 @@ typedef struct st_key {
enum ha_key_alg algorithm;
KEY_PART_INFO *key_part;
char *name; /* Name of key */
- ulong *rec_per_key; /* Key part distribution */
+ /*
+ Array of AVG(#records with the same field value) for 1st ... Nth key part.
+ 0 means 'not known'.
+ For temporary heap tables this member is NULL.
+ */
+ ulong *rec_per_key;
union {
int bdb_return_if_eq;
} handler;