diff options
-rw-r--r-- | Docs/manual.texi | 8 | ||||
-rw-r--r-- | mysql-test/r/null_key.result | 2 | ||||
-rw-r--r-- | sql/sql_insert.cc | 15 | ||||
-rw-r--r-- | sql/sql_select.cc | 84 |
4 files changed, 65 insertions, 44 deletions
diff --git a/Docs/manual.texi b/Docs/manual.texi index 9eff296b7e6..e67e1dc0468 100644 --- a/Docs/manual.texi +++ b/Docs/manual.texi @@ -46785,7 +46785,7 @@ users use this code as the rest of the code and because of this we are not yet 100% confident in this code. @menu -* News-3.23.46:: +* News-3.23.46:: Changes in release 3.23.46 * News-3.23.45:: Changes in release 3.23.45 * News-3.23.44:: Changes in release 3.23.44 * News-3.23.43:: Changes in release 3.23.43 @@ -46846,6 +46846,12 @@ One can now kill @code{ANALYZE},@code{REPAIR} and @code{OPTIMIZE TABLE} when the thread is waiting to get a lock on the table. @item Fixed race condition in @code{ANALYZE TABLE}. +@item +Fixed bug when joining with caching (unlikely to happen). +@item +Fixed race condition when using the binary log and @code{INSERT DELAYED} +which could cause the binary log to have rows that was not yet written +to MyISAM tables. @end itemize @node News-3.23.45, News-3.23.44, News-3.23.46, News-3.23.x diff --git a/mysql-test/r/null_key.result b/mysql-test/r/null_key.result index d0d59cdebf8..ead1dc29326 100644 --- a/mysql-test/r/null_key.result +++ b/mysql-test/r/null_key.result @@ -11,7 +11,7 @@ t1 index NULL a 9 NULL 12 where used; Using index table type possible_keys key key_len ref rows Extra t1 range a,b a 9 NULL 3 where used; Using index table type possible_keys key key_len ref rows Extra -t1 range a,b a 9 NULL 2 where used; Using index +t1 ref a,b b 4 const 2 where used table type possible_keys key key_len ref rows Extra t1 ref a,b a 5 const 3 where used; Using index table type possible_keys key key_len ref rows Extra diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index f7ff3ed159c..d8861390d87 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1099,7 +1099,7 @@ bool delayed_insert::handle_inserts(void) { int error; uint max_rows; - bool using_ignore=0; + bool using_ignore=0, using_bin_log=mysql_bin_log.is_open(); delayed_row *row; DBUG_ENTER("handle_inserts"); @@ -1124,7 +1124,13 @@ bool delayed_insert::handle_inserts(void) max_rows= ~0; // Do as much as possible } - table->file->extra(HA_EXTRA_WRITE_CACHE); + /* + We can't use row caching when using the binary log because if + we get a crash, then binary log will contain rows that are not yet + written to disk, which will cause problems in replication. + */ + if (!using_bin_log) + table->file->extra(HA_EXTRA_WRITE_CACHE); pthread_mutex_lock(&mutex); while ((row=rows.get())) { @@ -1161,7 +1167,7 @@ bool delayed_insert::handle_inserts(void) if (row->query && row->log_query) { mysql_update_log.write(&thd,row->query, row->query_length); - if (mysql_bin_log.is_open()) + if (using_bin_log) { thd.query_length = row->query_length; Query_log_event qinfo(&thd, row->query); @@ -1197,7 +1203,8 @@ bool delayed_insert::handle_inserts(void) /* This should never happen */ sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name); } - table->file->extra(HA_EXTRA_WRITE_CACHE); + if (!using_bin_log) + table->file->extra(HA_EXTRA_WRITE_CACHE); pthread_mutex_lock(&mutex); thd.proc_info="insert"; } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 777a10503f6..774be3679a2 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1881,52 +1881,55 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, ** Find how much space the prevous read not const tables takes in cache */ +static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab) +{ + uint null_fields,blobs,fields,rec_length; + null_fields=blobs=fields=rec_length=0; + + Field **f_ptr,*field; + for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++) + { + if (field->query_id == thd->query_id) + { + uint flags=field->flags; + fields++; + rec_length+=field->pack_length(); + if (flags & BLOB_FLAG) + blobs++; + if (!(flags & NOT_NULL_FLAG)) + null_fields++; + } + } + if (null_fields) + rec_length+=(join_tab->table->null_fields+7)/8; + if (join_tab->table->maybe_null) + rec_length+=sizeof(my_bool); + if (blobs) + { + uint blob_length=(uint) (join_tab->table->file->mean_rec_length- + (join_tab->table->reclength- rec_length)); + rec_length+=(uint) max(4,blob_length); + } + join_tab->used_fields=fields; + join_tab->used_fieldlength=rec_length; + join_tab->used_blobs=blobs; +} + + static uint cache_record_length(JOIN *join,uint idx) { - uint length; + uint length=0; JOIN_TAB **pos,**end; THD *thd=join->thd; - length=0; for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ; pos != end ; pos++) { JOIN_TAB *join_tab= *pos; - if (!join_tab->used_fieldlength) - { /* Not calced yet */ - uint null_fields,blobs,fields,rec_length; - null_fields=blobs=fields=rec_length=0; - - Field **f_ptr,*field; - for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++) - { - if (field->query_id == thd->query_id) - { - uint flags=field->flags; - fields++; - rec_length+=field->pack_length(); - if (flags & BLOB_FLAG) - blobs++; - if (!(flags & NOT_NULL_FLAG)) - null_fields++; - } - } - if (null_fields) - rec_length+=(join_tab->table->null_fields+7)/8; - if (join_tab->table->maybe_null) - rec_length+=sizeof(my_bool); - if (blobs) - { - uint blob_length=(uint) (join_tab->table->file->mean_rec_length- - (join_tab->table->reclength- rec_length)); - rec_length+=(uint) max(4,blob_length); - } - join_tab->used_fields=fields; - join_tab->used_fieldlength=rec_length; - join_tab->used_blobs=blobs; - } + if (!join_tab->used_fieldlength) /* Not calced yet */ + calc_used_field_length(thd, join_tab); length+=join_tab->used_fieldlength; } return length; @@ -2248,6 +2251,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) used_tables|=current_map; if (tab->type == JT_REF && tab->quick && + tab->ref.key == tab->quick->index && tab->ref.key_length < tab->quick->max_used_key_length) { /* Range uses longer key; Use this instead of ref on key */ @@ -5631,15 +5635,19 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) uint length,blobs,size; CACHE_FIELD *copy,**blob_ptr; JOIN_CACHE *cache; + JOIN_TAB *join_tab; DBUG_ENTER("join_init_cache"); cache= &tables[table_count].cache; cache->fields=blobs=0; - for (i=0 ; i < table_count ; i++) + join_tab=tables; + for (i=0 ; i < table_count ; i++,join_tab++) { - cache->fields+=tables[i].used_fields; - blobs+=tables[i].used_blobs; + if (!join_tab->used_fieldlength) /* Not calced yet */ + calc_used_field_length(thd, join_tab); + cache->fields+=join_tab->used_fields; + blobs+=join_tab->used_blobs; } if (!(cache->field=(CACHE_FIELD*) sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)* |