summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/slave.cc7
-rw-r--r--sql/sql_base.cc4
-rw-r--r--sql/sql_insert.cc15
-rw-r--r--sql/sql_select.cc84
4 files changed, 65 insertions, 45 deletions
diff --git a/sql/slave.cc b/sql/slave.cc
index d6f0809c277..700838d7cd7 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1013,7 +1013,12 @@ static int exec_event(THD* thd, NET* net, MASTER_INFO* mi, int event_len)
mi->inc_pos(event_len, ev->log_seq);
flush_master_info(mi);
- if(slave_skip_counter)
+ if(slave_skip_counter && /* protect against common user error of
+ setting the counter to 1 instead of 2
+ while recovering from an failed
+ auto-increment insert */
+ !(type_code == INTVAR_EVENT &&
+ slave_skip_counter == 1))
--slave_skip_counter;
delete ev;
return 0; // avoid infinite update loops
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 2c2c3f703b9..b913233806f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -533,10 +533,10 @@ void close_temporary_tables(THD *thd)
if (query) // we might be out of memory, but this is not fatal
{
// skip temporary tables not created directly by the user
- if (table->table_name[0] != '#')
+ if (table->real_name[0] != '#')
{
end = strxmov(end,table->table_cache_key,".",
- table->table_name,",", NullS);
+ table->real_name,",", NullS);
// here we assume table_cache_key always starts
// with \0 terminated db name
found_user_tables = 1;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 150ed503078..70cc5f412a1 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1126,7 +1126,7 @@ bool delayed_insert::handle_inserts(void)
{
int error;
uint max_rows;
- bool using_ignore=0;
+ bool using_ignore=0, using_bin_log=mysql_bin_log.is_open();
delayed_row *row;
DBUG_ENTER("handle_inserts");
@@ -1151,7 +1151,13 @@ bool delayed_insert::handle_inserts(void)
max_rows= ~0; // Do as much as possible
}
- table->file->extra(HA_EXTRA_WRITE_CACHE);
+ /*
+ We can't use row caching when using the binary log because if
+ we get a crash, then binary log will contain rows that are not yet
+ written to disk, which will cause problems in replication.
+ */
+ if (!using_bin_log)
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
while ((row=rows.get()))
{
@@ -1188,7 +1194,7 @@ bool delayed_insert::handle_inserts(void)
if (row->query && row->log_query)
{
mysql_update_log.write(&thd,row->query, row->query_length);
- if (mysql_bin_log.is_open())
+ if (using_bin_log)
{
thd.query_length = row->query_length;
Query_log_event qinfo(&thd, row->query);
@@ -1224,7 +1230,8 @@ bool delayed_insert::handle_inserts(void)
/* This should never happen */
sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name);
}
- table->file->extra(HA_EXTRA_WRITE_CACHE);
+ if (!using_bin_log)
+ table->file->extra(HA_EXTRA_WRITE_CACHE);
pthread_mutex_lock(&mutex);
thd.proc_info="insert";
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index b40c98cbb52..08fbf753997 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1950,52 +1950,55 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
** Find how much space the prevous read not const tables takes in cache
*/
+static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
+{
+ uint null_fields,blobs,fields,rec_length;
+ null_fields=blobs=fields=rec_length=0;
+
+ Field **f_ptr,*field;
+ for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
+ {
+ if (field->query_id == thd->query_id)
+ {
+ uint flags=field->flags;
+ fields++;
+ rec_length+=field->pack_length();
+ if (flags & BLOB_FLAG)
+ blobs++;
+ if (!(flags & NOT_NULL_FLAG))
+ null_fields++;
+ }
+ }
+ if (null_fields)
+ rec_length+=(join_tab->table->null_fields+7)/8;
+ if (join_tab->table->maybe_null)
+ rec_length+=sizeof(my_bool);
+ if (blobs)
+ {
+ uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
+ (join_tab->table->reclength- rec_length));
+ rec_length+=(uint) max(4,blob_length);
+ }
+ join_tab->used_fields=fields;
+ join_tab->used_fieldlength=rec_length;
+ join_tab->used_blobs=blobs;
+}
+
+
static uint
cache_record_length(JOIN *join,uint idx)
{
- uint length;
+ uint length=0;
JOIN_TAB **pos,**end;
THD *thd=join->thd;
- length=0;
for (pos=join->best_ref+join->const_tables,end=join->best_ref+idx ;
pos != end ;
pos++)
{
JOIN_TAB *join_tab= *pos;
- if (!join_tab->used_fieldlength)
- { /* Not calced yet */
- uint null_fields,blobs,fields,rec_length;
- null_fields=blobs=fields=rec_length=0;
-
- Field **f_ptr,*field;
- for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
- {
- if (field->query_id == thd->query_id)
- {
- uint flags=field->flags;
- fields++;
- rec_length+=field->pack_length();
- if (flags & BLOB_FLAG)
- blobs++;
- if (!(flags & NOT_NULL_FLAG))
- null_fields++;
- }
- }
- if (null_fields)
- rec_length+=(join_tab->table->null_fields+7)/8;
- if (join_tab->table->maybe_null)
- rec_length+=sizeof(my_bool);
- if (blobs)
- {
- uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
- (join_tab->table->reclength- rec_length));
- rec_length+=(uint) max(4,blob_length);
- }
- join_tab->used_fields=fields;
- join_tab->used_fieldlength=rec_length;
- join_tab->used_blobs=blobs;
- }
+ if (!join_tab->used_fieldlength) /* Not calced yet */
+ calc_used_field_length(thd, join_tab);
length+=join_tab->used_fieldlength;
}
return length;
@@ -2319,6 +2322,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
used_tables|=current_map;
if (tab->type == JT_REF && tab->quick &&
+ tab->ref.key == tab->quick->index &&
tab->ref.key_length < tab->quick->max_used_key_length)
{
/* Range uses longer key; Use this instead of ref on key */
@@ -5781,15 +5785,19 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
uint length,blobs,size;
CACHE_FIELD *copy,**blob_ptr;
JOIN_CACHE *cache;
+ JOIN_TAB *join_tab;
DBUG_ENTER("join_init_cache");
cache= &tables[table_count].cache;
cache->fields=blobs=0;
- for (i=0 ; i < table_count ; i++)
+ join_tab=tables;
+ for (i=0 ; i < table_count ; i++,join_tab++)
{
- cache->fields+=tables[i].used_fields;
- blobs+=tables[i].used_blobs;
+ if (!join_tab->used_fieldlength) /* Not calced yet */
+ calc_used_field_length(thd, join_tab);
+ cache->fields+=join_tab->used_fields;
+ blobs+=join_tab->used_blobs;
}
if (!(cache->field=(CACHE_FIELD*)
sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)*