summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authormonty@hundin.mysql.fi <>2001-11-27 02:50:20 +0200
committermonty@hundin.mysql.fi <>2001-11-27 02:50:20 +0200
commit66a31433a4f96e946e86575b68432f1d5e28510c (patch)
tree390d698047d7a779d20d1f8db12dc8a715816aee /sql
parent50b43f85ef6ae6d01e48a0c4b57df20299393618 (diff)
downloadmariadb-git-66a31433a4f96e946e86575b68432f1d5e28510c.tar.gz
Fixed bug when joining with caching.
Fixed race condition when using the binary log and INSERT DELAYED which could cause the binary log to have rows that was not yet written to MyISAM tables.
Diffstat (limited to 'sql')
-rw-r--r--sql/sql_insert.cc15
-rw-r--r--sql/sql_select.cc84
2 files changed, 57 insertions, 42 deletions
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index f7ff3ed159c..d8861390d87 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1099,7 +1099,7 @@ bool delayed_insert::handle_inserts(void)
{
int error;
uint max_rows;
- bool using_ignore=0;
+ bool using_ignore=0, using_bin_log=mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@@ -1124,7 +1124,13 @@ bool delayed_insert::handle_inserts(void)
max_rows= ~0; // Do as much as possible
}
- table->file->extra(HA_EXTRA_WRITE_CACHE);
+ /*
+ We can't use row caching when using the binary log because if
+ we get a crash, then binary log will contain rows that are not yet
+ written to disk, which will cause problems in replication.
+ */
+ if (!using_bin_log)
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
while ((row=rows.get()))
{
@@ -1161,7 +1167,7 @@ bool delayed_insert::handle_inserts(void)
if (row->query && row->log_query)
{
mysql_update_log.write(&thd,row->query, row->query_length);
- if (mysql_bin_log.is_open())
+ if (using_bin_log)
{
thd.query_length = row->query_length;
Query_log_event qinfo(&thd, row->query);
@@ -1197,7 +1203,8 @@ bool delayed_insert::handle_inserts(void)
/* This should never happen */
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name);
}
- table->file->extra(HA_EXTRA_WRITE_CACHE);
+ if (!using_bin_log)
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
thd.proc_info="insert";
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 777a10503f6..774be3679a2 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1881,52 +1881,55 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
** Find how much space the prevous read not const tables takes in cache
*/
+static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
+{
+ uint null_fields,blobs,fields,rec_length;
+ null_fields=blobs=fields=rec_length=0;
+
+ Field **f_ptr,*field;
+ for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
+ {
+ if (field->query_id == thd->query_id)
+ {
+ uint flags=field->flags;
+ fields++;
+ rec_length+=field->pack_length();
+ if (flags & BLOB_FLAG)
+ blobs++;
+ if (!(flags & NOT_NULL_FLAG))
+ null_fields++;
+ }
+ }
+ if (null_fields)
+ rec_length+=(join_tab->table->null_fields+7)/8;
+ if (join_tab->table->maybe_null)
+ rec_length+=sizeof(my_bool);
+ if (blobs)
+ {
+ uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
+ (join_tab->table->reclength- rec_length));
+ rec_length+=(uint) max(4,blob_length);
+ }
+ join_tab->used_fields=fields;
+ join_tab->used_fieldlength=rec_length;
+ join_tab->used_blobs=blobs;
+}
+
+
static uint
cache_record_length(JOIN *join,uint idx)
{
- uint length;
+ uint length=0;
JOIN_TAB **pos,**end;
THD *thd=join->thd;
- length=0;
for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ;
pos != end ;
pos++)
{
JOIN_TAB *join_tab= *pos;
- if (!join_tab->used_fieldlength)
- { /* Not calced yet */
- uint null_fields,blobs,fields,rec_length;
- null_fields=blobs=fields=rec_length=0;
-
- Field **f_ptr,*field;
- for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
- {
- if (field->query_id == thd->query_id)
- {
- uint flags=field->flags;
- fields++;
- rec_length+=field->pack_length();
- if (flags & BLOB_FLAG)
- blobs++;
- if (!(flags & NOT_NULL_FLAG))
- null_fields++;
- }
- }
- if (null_fields)
- rec_length+=(join_tab->table->null_fields+7)/8;
- if (join_tab->table->maybe_null)
- rec_length+=sizeof(my_bool);
- if (blobs)
- {
- uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
- (join_tab->table->reclength- rec_length));
- rec_length+=(uint) max(4,blob_length);
- }
- join_tab->used_fields=fields;
- join_tab->used_fieldlength=rec_length;
- join_tab->used_blobs=blobs;
- }
+ if (!join_tab->used_fieldlength) /* Not calced yet */
+ calc_used_field_length(thd, join_tab);
length+=join_tab->used_fieldlength;
}
return length;
@@ -2248,6 +2251,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
used_tables|=current_map;
if (tab->type == JT_REF && tab->quick &&
+ tab->ref.key == tab->quick->index &&
tab->ref.key_length < tab->quick->max_used_key_length)
{
/* Range uses longer key; Use this instead of ref on key */
@@ -5631,15 +5635,19 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
uint length,blobs,size;
CACHE_FIELD *copy,**blob_ptr;
JOIN_CACHE *cache;
+ JOIN_TAB *join_tab;
DBUG_ENTER("join_init_cache");
cache= &tables[table_count].cache;
cache->fields=blobs=0;
- for (i=0 ; i < table_count ; i++)
+ join_tab=tables;
+ for (i=0 ; i < table_count ; i++,join_tab++)
{
- cache->fields+=tables[i].used_fields;
- blobs+=tables[i].used_blobs;
+ if (!join_tab->used_fieldlength) /* Not calced yet */
+ calc_used_field_length(thd, join_tab);
+ cache->fields+=join_tab->used_fields;
+ blobs+=join_tab->used_blobs;
}
if (!(cache->field=(CACHE_FIELD*)
sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)*