summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacob Mathew <jacob.mathew@mariadb.com>2017-09-20 15:05:34 -0700
committerJacob Mathew <jacob.mathew@mariadb.com>2017-09-29 17:58:56 -0700
commitcbfc2e93975125c4289f2f3cc8d450c004ac2353 (patch)
tree712231afca52e606726c33872e34907fcabf69b3
parent6387a4eef4f9b6732b8309c0cc3b5b443988f093 (diff)
downloadmariadb-git-bb-10.2.4-spider-extra-jacob.tar.gz
Adding direct update/delete to the server and to the partition engine.bb-10.2.4-spider-extra-jacob
Contains Spiral patches: - Spiral Patch 006: 006_mariadb-10.2.0.direct_update_rows.diff MDEV-7704 - Spiral Patch 008: 008_mariadb-10.2.0.partition_direct_update.diff MDEV-7706 - Spiral Patch 010: 010_mariadb-10.2.0.direct_update_rows2.diff MDEV-7708 - Spiral Patch 011: 011_mariadb-10.2.0.aggregate.diff MDEV-7709 - Spiral Patch 027: 027_mariadb-10.2.0.force_bulk_update.diff MDEV-7724 - Spiral Patch 061: 061_mariadb-10.2.0.mariadb-10.1.8.diff MDEV-12870 - Add support for direct update and direct delete requests. A direct update/delete request handles all qualified rows in a single operation, rather than one row at a time. - This patch has the following differences compared to the original patch: - Most of the parameters of the new functions are unnecessary. The unnecessary parameters have been removed. - Changed bit positions for new handler flags upon consideration of handler flags not needed by other Spiral patches.
-rw-r--r--sql/ha_partition.cc850
-rw-r--r--sql/ha_partition.h21
-rw-r--r--sql/handler.cc129
-rw-r--r--sql/handler.h77
-rw-r--r--sql/opt_sum.cc11
-rw-r--r--sql/sql_delete.cc163
-rw-r--r--sql/sql_update.cc702
-rw-r--r--storage/spider/ha_spider.cc1
-rw-r--r--storage/spider/ha_spider.h101
-rw-r--r--storage/spider/mysql-test/spider/bg/r/direct_update.result8
-rw-r--r--storage/spider/mysql-test/spider/bg/r/direct_update_part.result8
-rw-r--r--storage/spider/mysql-test/spider/bg/r/spider_fixes.result1
-rw-r--r--storage/spider/mysql-test/spider/handler/r/direct_update.result8
-rw-r--r--storage/spider/mysql-test/spider/handler/r/direct_update_part.result8
-rw-r--r--storage/spider/mysql-test/spider/handler/r/spider_fixes.result1
-rw-r--r--storage/spider/mysql-test/spider/r/direct_update.result8
-rw-r--r--storage/spider/mysql-test/spider/r/direct_update_part.result8
-rw-r--r--storage/spider/mysql-test/spider/r/spider_fixes.result1
-rw-r--r--storage/spider/spd_conn.cc1
-rw-r--r--storage/spider/spd_copy_tables.cc1
-rw-r--r--storage/spider/spd_db_conn.cc1
-rw-r--r--storage/spider/spd_db_handlersocket.cc1
-rw-r--r--storage/spider/spd_db_mysql.cc1
-rw-r--r--storage/spider/spd_db_oracle.cc1
-rw-r--r--storage/spider/spd_direct_sql.cc1
-rw-r--r--storage/spider/spd_environ.h37
-rw-r--r--storage/spider/spd_i_s.cc1
-rw-r--r--storage/spider/spd_malloc.cc1
-rw-r--r--storage/spider/spd_param.cc1
-rw-r--r--storage/spider/spd_ping_table.cc1
-rw-r--r--storage/spider/spd_sys_table.cc1
-rw-r--r--storage/spider/spd_table.cc1
-rw-r--r--storage/spider/spd_trx.cc1
33 files changed, 1747 insertions, 410 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 1e500cf4f37..fb214b90e6e 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -11487,6 +11487,856 @@ void ha_partition::cond_pop()
/**
+ Perform bulk update preparation on each partition.
+
+ SYNOPSIS
+ start_bulk_update()
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
+
+bool ha_partition::start_bulk_update()
+{
+ handler **file= m_file;
+ DBUG_ENTER("ha_partition::start_bulk_update");
+
+ if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+ table->write_set))
+ DBUG_RETURN(TRUE);
+
+ do
+ {
+ if ((*file)->start_bulk_update())
+ DBUG_RETURN(TRUE);
+ } while (*(++file));
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Perform bulk update execution on each partition. A bulk update allows
+ a handler to batch the updated rows instead of performing the updates
+ one row at a time.
+
+ SYNOPSIS
+ exec_bulk_update()
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
+
+int ha_partition::exec_bulk_update(uint *dup_key_found)
+{
+ int error;
+ handler **file= m_file;
+ DBUG_ENTER("ha_partition::exec_bulk_update");
+
+ do
+ {
+ if ((error = (*file)->exec_bulk_update(dup_key_found)))
+ DBUG_RETURN(error);
+ } while (*(++file));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Perform bulk update cleanup on each partition.
+
+ SYNOPSIS
+ end_bulk_update()
+
+ RETURN VALUE
+ NONE
+*/
+
+void ha_partition::end_bulk_update()
+{
+ handler **file= m_file;
+ DBUG_ENTER("ha_partition::end_bulk_update");
+
+ do
+ {
+ (*file)->end_bulk_update();
+ } while (*(++file));
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Add the row to the bulk update on the partition on which the row is stored.
+ A bulk update allows a handler to batch the updated rows instead of
+ performing the updates one row at a time.
+
+ SYNOPSIS
+ bulk_update_row()
+ old_data Old record
+ new_data New record
+ dup_key_found Number of duplicate keys found
+
+ RETURN VALUE
+ >1 Error
+ 1 Bulk update not used, normal operation used
+ 0 Bulk update used by handler
+*/
+
+int ha_partition::bulk_update_row(const uchar *old_data, uchar *new_data,
+ uint *dup_key_found)
+{
+ int error = 0;
+ uint32 part_id;
+ longlong func_value;
+ my_bitmap_map *old_map;
+ DBUG_ENTER("ha_partition::bulk_update_row");
+
+ old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ error= m_part_info->get_partition_id(m_part_info, &part_id,
+ &func_value);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (unlikely(error))
+ {
+ m_part_info->err_value= func_value;
+ goto end;
+ }
+
+ error = m_file[part_id]->ha_bulk_update_row(old_data, new_data,
+ dup_key_found);
+
+end:
+ DBUG_RETURN(error);
+}
+
+
+/**
+ Perform bulk delete preparation on each partition.
+
+ SYNOPSIS
+ start_bulk_delete()
+
+ RETURN VALUE
+ TRUE Error
+ FALSE Success
+*/
+
+bool ha_partition::start_bulk_delete()
+{
+ handler **file= m_file;
+ DBUG_ENTER("ha_partition::start_bulk_delete");
+
+ do
+ {
+ if ((*file)->start_bulk_delete())
+ DBUG_RETURN(TRUE);
+ } while (*(++file));
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Perform bulk delete cleanup on each partition.
+
+ SYNOPSIS
+ end_bulk_delete()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::end_bulk_delete()
+{
+ int error= 0;
+ handler **file= m_file;
+ DBUG_ENTER("ha_partition::end_bulk_delete");
+
+ do
+ {
+ int tmp;
+ if ((tmp= (*file)->end_bulk_delete()))
+ error= tmp;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/**
+ Perform initialization for a direct update request.
+
+ SYNOPSIS
+ direct_update_rows_init()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::direct_update_rows_init()
+{
+ int error;
+ uint i, j;
+ handler *file;
+ DBUG_ENTER("ha_partition::direct_update_rows_init");
+
+ if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+ table->write_set)
+#if defined(HS_HAS_SQLCOM)
+ &&
+ (thd_sql_command(ha_thd()) != SQLCOM_HS_UPDATE ||
+ check_hs_update_overlapping(&ranges->start_key))
+#endif
+ )
+ {
+ DBUG_PRINT("info", ("partition FALSE by updating part_key"));
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+
+ m_part_spec.start_part= 0;
+ m_part_spec.end_part= m_tot_parts - 1;
+ m_direct_update_part_spec = m_part_spec;
+
+ j = 0;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ file = m_file[i];
+ if ((error= file->ha_direct_update_rows_init()))
+ {
+ DBUG_PRINT("info", ("partition FALSE by storage engine"));
+ DBUG_RETURN(error);
+ }
+ j++;
+ }
+ }
+
+ TABLE_LIST *table_list= table->pos_in_table_list;
+ if (j != 1 && table_list)
+ {
+ while (table_list->parent_l)
+ table_list = table_list->parent_l;
+ st_select_lex *select_lex= table_list->select_lex;
+ DBUG_PRINT("info", ("partition select_lex=%p", select_lex));
+ if (select_lex && select_lex->explicit_limit)
+ {
+ DBUG_PRINT("info", ("partition explicit_limit=TRUE"));
+ DBUG_PRINT("info", ("partition offset_limit=%p",
+ select_lex->offset_limit));
+ DBUG_PRINT("info", ("partition select_limit=%p",
+ select_lex->select_limit));
+ DBUG_PRINT("info", ("partition FALSE by select_lex"));
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+ }
+ DBUG_PRINT("info", ("partition OK"));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Do initialization for performing parallel direct update
+ for a handlersocket update request.
+
+ SYNOPSIS
+ pre_direct_update_rows_init()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::pre_direct_update_rows_init()
+{
+ int error;
+ uint i, j;
+ handler *file;
+ TABLE_LIST *table_list= table->pos_in_table_list;
+ DBUG_ENTER("ha_partition::pre_direct_update_rows_init");
+
+ if (bitmap_is_overlapping(&m_part_info->full_part_field_set,
+ table->write_set)
+#if defined(HS_HAS_SQLCOM)
+ &&
+ (thd_sql_command(ha_thd()) != SQLCOM_HS_UPDATE ||
+ check_hs_update_overlapping(&ranges->start_key))
+#endif
+ )
+ {
+ DBUG_PRINT("info", ("partition FALSE by updating part_key"));
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+
+ m_part_spec.start_part= 0;
+ m_part_spec.end_part= m_tot_parts - 1;
+ m_direct_update_part_spec = m_part_spec;
+
+ j = 0;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ file = m_file[i];
+ if ((error= file->ha_pre_direct_update_rows_init()))
+ {
+ DBUG_PRINT("info", ("partition FALSE by storage engine"));
+ DBUG_RETURN(error);
+ }
+ j++;
+ }
+ }
+
+ if (j != 1 && table_list)
+ {
+ while (table_list->parent_l)
+ table_list = table_list->parent_l;
+ st_select_lex *select_lex= table_list->select_lex;
+ DBUG_PRINT("info", ("partition select_lex=%p", select_lex));
+ if (select_lex && select_lex->explicit_limit)
+ {
+ DBUG_PRINT("info", ("partition explicit_limit=TRUE"));
+ DBUG_PRINT("info", ("partition offset_limit=%p",
+ select_lex->offset_limit));
+ DBUG_PRINT("info", ("partition select_limit=%p",
+ select_lex->select_limit));
+ DBUG_PRINT("info", ("partition FALSE by select_lex"));
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+ }
+ if (bulk_access_started)
+ bulk_access_info_current->called = TRUE;
+ DBUG_PRINT("info", ("partition OK"));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Execute a direct update request. A direct update request updates all
+ qualified rows in a single operation, rather than one row at a time.
+ The direct update operation is pushed down to each individual
+ partition.
+
+ SYNOPSIS
+ direct_update_rows()
+ update_rows Number of updated rows
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::direct_update_rows(uint *update_rows)
+{
+ int error;
+ bool rnd_seq= FALSE;
+ uint m_update_rows, i;
+ handler *file;
+ DBUG_ENTER("ha_partition::direct_update_rows");
+
+ if (inited == RND && m_scan_value == 1)
+ {
+ rnd_seq= TRUE;
+ m_scan_value= 2;
+ }
+
+ *update_rows= 0;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ file = m_file[i];
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ if (rnd_seq && file->inited == NONE)
+ {
+ if ((error = file->ha_rnd_init(TRUE)))
+ DBUG_RETURN(error);
+ }
+ if ((error= (file)->ha_direct_update_rows(&m_update_rows)))
+ {
+ if (rnd_seq)
+ file->ha_rnd_end();
+ DBUG_RETURN(error);
+ }
+ *update_rows += m_update_rows;
+ }
+ if (rnd_seq && (error = file->ha_index_or_rnd_end()))
+ DBUG_RETURN(error);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Start parallel execution of a direct update for a handlersocket update
+ request. A direct update request updates all qualified rows in a single
+ operation, rather than one row at a time. The direct update operation
+ is pushed down to each individual partition.
+
+ SYNOPSIS
+ pre_direct_update_rows()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::pre_direct_update_rows()
+{
+ int error;
+ bool rnd_seq= FALSE;
+ uint m_update_rows, i;
+ handler *file;
+ DBUG_ENTER("ha_partition::pre_direct_update_rows");
+
+ if (pre_inited == RND && m_scan_value == 1)
+ {
+ rnd_seq= TRUE;
+ m_scan_value= 2;
+ }
+
+ m_part_spec = m_direct_update_part_spec;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ file = m_file[i];
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ if (rnd_seq && file->pre_inited == NONE)
+ {
+ if ((error = file->ha_pre_rnd_init(TRUE)))
+ DBUG_RETURN(error);
+ }
+ if ((error= (file)->ha_pre_direct_update_rows()))
+ {
+ if (rnd_seq)
+ file->ha_pre_rnd_end();
+ DBUG_RETURN(error);
+ }
+ bitmap_set_bit(&bulk_access_exec_bitmap, i);
+ }
+ if (rnd_seq && (error = file->ha_pre_index_or_rnd_end()))
+ DBUG_RETURN(error);
+ }
+ DBUG_RETURN(0);
+}
+
+
+#if defined(HS_HAS_SQLCOM)
+/**
+ Determine whether a key value being updated includes partition columns
+ when using handlersocket
+
+ SYNOPSIS
+ check_hs_update_overlapping()
+ key Key value
+
+ RETURN VALUE
+ TRUE The key value being updated does not include
+ partition columns
+ FALSE The key value being updated does include
+ partition columns
+*/
+
+bool ha_partition::check_hs_update_overlapping(key_range *key)
+{
+ Field *field;
+ uint store_length, length, var_len, field_index;
+ const uchar *ptr;
+ bool key_eq;
+ KEY *key_info;
+ KEY_PART_INFO *key_part;
+ key_part_map tgt_key_part_map = key->keypart_map;
+ char buf[MAX_FIELD_WIDTH], buf2[MAX_FIELD_WIDTH];
+ String tmp_str(buf, MAX_FIELD_WIDTH, &my_charset_bin), *str,
+ tmp_str2(buf2, MAX_FIELD_WIDTH, &my_charset_bin), *str2;
+ DBUG_ENTER("ha_partition::check_hs_update_overlapping");
+
+ key_info = &table->key_info[active_index];
+ for (key_part = key_info->key_part,
+ store_length = key_part->store_length, length = 0;
+ tgt_key_part_map;
+ length += store_length, tgt_key_part_map >>= 1, key_part++,
+ store_length = key_part->store_length)
+ {
+ field = key_part->field;
+ field_index = field->field_index;
+ if (bitmap_is_set(&m_part_info->full_part_field_set, field_index) &&
+ bitmap_is_set(table->write_set, field_index))
+ {
+ ptr = key->key + length;
+ key_eq = (tgt_key_part_map > 1);
+ if (key_part->null_bit && *ptr++)
+ {
+ if (key->flag != HA_READ_KEY_EXACT || !field->is_null())
+ {
+ DBUG_PRINT("info", ("spider flag=%u is_null=%s",
+ key->flag,
+ field->is_null() ? "TRUE" : "FALSE"));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ {
+ if (field->type() == MYSQL_TYPE_BLOB ||
+ field->real_type() == MYSQL_TYPE_VARCHAR ||
+ field->type() == MYSQL_TYPE_GEOMETRY)
+ {
+ var_len = uint2korr(ptr);
+ tmp_str.set_quick((char *)ptr + HA_KEY_BLOB_LENGTH, var_len,
+ &my_charset_bin);
+ str = &tmp_str;
+ }
+ else
+ str = field->val_str(&tmp_str, ptr);
+ str2 = field->val_str(&tmp_str2);
+ if (str->length() != str2->length() ||
+ memcmp(str->ptr(), str2->ptr(), str->length()))
+ {
+ DBUG_PRINT("info", ("spider length=%u %u",
+ str->length(), str2->length()));
+ DBUG_PRINT("info", ("spider length=%s %s",
+ str->c_ptr_safe(), str2->c_ptr_safe()));
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ }
+
+ +DBUG_PRINT("info", ("partition return FALSE"));
+ +DBUG_RETURN(FALSE);
+}
+#endif
+
+
+/**
+ Perform initialization for a direct delete request.
+
+ SYNOPSIS
+ direct_delete_rows_init()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::direct_delete_rows_init()
+{
+ int error;
+ uint i, j;
+ handler *file;
+ DBUG_ENTER("ha_partition::direct_delete_rows_init");
+
+ m_part_spec.start_part= 0;
+ m_part_spec.end_part= m_tot_parts - 1;
+ m_direct_update_part_spec = m_part_spec;
+
+ j = 0;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ file = m_file[i];
+ if ((error= file->ha_direct_delete_rows_init()))
+ {
+ DBUG_PRINT("info", ("partition FALSE by storage engine"));
+ DBUG_RETURN(error);
+ }
+ j++;
+ }
+ }
+
+ TABLE_LIST *table_list= table->pos_in_table_list;
+ if (j != 1 && table_list)
+ {
+ while (table_list->parent_l)
+ table_list = table_list->parent_l;
+ st_select_lex *select_lex= table_list->select_lex;
+ DBUG_PRINT("info", ("partition select_lex=%p", select_lex));
+ if (select_lex && select_lex->explicit_limit)
+ {
+ DBUG_PRINT("info", ("partition explicit_limit=TRUE"));
+ DBUG_PRINT("info", ("partition offset_limit=%p",
+ select_lex->offset_limit));
+ DBUG_PRINT("info", ("partition select_limit=%p",
+ select_lex->select_limit));
+ DBUG_PRINT("info", ("partition FALSE by select_lex"));
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+ }
+ DBUG_PRINT("info", ("partition OK"));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Do initialization for performing parallel direct delete
+ for a handlersocket delete request.
+
+ SYNOPSIS
+ pre_direct_delete_rows_init()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::pre_direct_delete_rows_init()
+{
+ int error;
+ uint i, j;
+ handler *file;
+ TABLE_LIST *table_list= table->pos_in_table_list;
+ DBUG_ENTER("ha_partition::pre_direct_delete_rows_init");
+
+ m_part_spec.start_part= 0;
+ m_part_spec.end_part= m_tot_parts - 1;
+ m_direct_update_part_spec = m_part_spec;
+
+ j = 0;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ file = m_file[i];
+ if ((error= file->ha_pre_direct_delete_rows_init()))
+ {
+ DBUG_PRINT("info", ("partition FALSE by storage engine"));
+ DBUG_RETURN(error);
+ }
+ j++;
+ }
+ }
+
+ if (j != 1 && table_list)
+ {
+ while (table_list->parent_l)
+ table_list = table_list->parent_l;
+ st_select_lex *select_lex= table_list->select_lex;
+ DBUG_PRINT("info", ("partition select_lex=%p", select_lex));
+ if (select_lex && select_lex->explicit_limit)
+ {
+ DBUG_PRINT("info", ("partition explicit_limit=TRUE"));
+ DBUG_PRINT("info", ("partition offset_limit=%p",
+ select_lex->offset_limit));
+ DBUG_PRINT("info", ("partition select_limit=%p",
+ select_lex->select_limit));
+ DBUG_PRINT("info", ("partition FALSE by select_lex"));
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+ }
+ if (bulk_access_started)
+ bulk_access_info_current->called = TRUE;
+ DBUG_PRINT("info", ("partition OK"));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Execute a direct delete request. A direct delete request deletes all
+ qualified rows in a single operation, rather than one row at a time.
+ The direct delete operation is pushed down to each individual
+ partition.
+
+ SYNOPSIS
+ direct_delete_rows()
+ delete_rows Number of deleted rows
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::direct_delete_rows(uint *delete_rows)
+{
+ int error;
+ bool rnd_seq= FALSE;
+ uint m_delete_rows, i;
+ handler *file;
+ DBUG_ENTER("ha_partition::direct_delete_rows");
+
+ if (inited == RND && m_scan_value == 1)
+ {
+ rnd_seq= TRUE;
+ m_scan_value= 2;
+ }
+
+ *delete_rows= 0;
+ m_part_spec = m_direct_update_part_spec;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ file = m_file[i];
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ if (rnd_seq && file->inited == NONE)
+ {
+ if ((error = file->ha_rnd_init(TRUE)))
+ DBUG_RETURN(error);
+ }
+ if ((error= file->ha_direct_delete_rows(&m_delete_rows)))
+ {
+ if (rnd_seq)
+ file->ha_rnd_end();
+ DBUG_RETURN(error);
+ }
+ *delete_rows += m_delete_rows;
+ }
+ if (rnd_seq && (error = file->ha_index_or_rnd_end()))
+ DBUG_RETURN(error);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Start parallel execution of a direct delete for a handlersocket delete
+ request. A direct delete request deletes all qualified rows in a single
+ operation, rather than one row at a time. The direct delete operation
+ is pushed down to each individual partition.
+
+ SYNOPSIS
+ pre_direct_delete_rows()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::pre_direct_delete_rows()
+{
+ int error;
+ bool rnd_seq= FALSE;
+ uint m_delete_rows, i;
+ handler *file;
+ DBUG_ENTER("ha_partition::pre_direct_delete_rows");
+
+ if (pre_inited == RND && m_scan_value == 1)
+ {
+ rnd_seq= TRUE;
+ m_scan_value= 2;
+ }
+
+ m_part_spec = m_direct_update_part_spec;
+ for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ {
+ file = m_file[i];
+ if (bitmap_is_set(&(m_part_info->read_partitions), i) &&
+ bitmap_is_set(&(m_part_info->lock_partitions), i))
+ {
+ if (rnd_seq && file->pre_inited == NONE)
+ {
+ if ((error = file->ha_pre_rnd_init(TRUE)))
+ DBUG_RETURN(error);
+ }
+ if ((error= file->ha_pre_direct_delete_rows()))
+ {
+ if (rnd_seq)
+ file->ha_pre_rnd_end();
+ DBUG_RETURN(error);
+ }
+ bitmap_set_bit(&bulk_access_exec_bitmap, i);
+ }
+ if (rnd_seq && (error = file->ha_pre_index_or_rnd_end()))
+ DBUG_RETURN(error);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Push metadata for the current operation down to each partition.
+
+ SYNOPSIS
+ info_push()
+
+ RETURN VALUE
+ >0 Error
+ 0 Success
+*/
+
+int ha_partition::info_push(uint info_type, void *info)
+{
+ int error= 0;
+ uint i;
+ handler **file= m_file;
+ DBUG_ENTER("ha_partition::info_push");
+
+ switch (info_type)
+ {
+ case INFO_KIND_BULK_ACCESS_BEGIN:
+ DBUG_PRINT("info", ("partition INFO_KIND_BULK_ACCESS_BEGIN"));
+ if (bulk_access_started)
+ {
+ if (!bulk_access_info_current->next)
+ {
+ if (!(bulk_access_info_current->next = create_bulk_access_info()))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ bulk_access_info_current->next->sequence_num =
+ bulk_access_info_current->sequence_num + 1;
+ }
+ bulk_access_info_current = bulk_access_info_current->next;
+ }
+ else
+ {
+ if (!bulk_access_info_first)
+ {
+ if (!(bulk_access_info_first = create_bulk_access_info()))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ bulk_access_info_first->sequence_num = 0;
+ }
+ bulk_access_info_current = bulk_access_info_first;
+ bulk_access_started = TRUE;
+ bulk_access_executing = FALSE;
+ }
+ bulk_access_info_current->used = TRUE;
+ bulk_access_info_current->called = FALSE;
+ *((void **)info) = bulk_access_info_current;
+ i = 0;
+ do
+ {
+ int tmp;
+ if ((tmp = (*file)->info_push(info_type,
+ &bulk_access_info_current->info[i])))
+ error= tmp;
+ ++i;
+ } while (*(++file));
+ DBUG_RETURN(error);
+ case INFO_KIND_BULK_ACCESS_CURRENT:
+ DBUG_PRINT("info", ("partition INFO_KIND_BULK_ACCESS_CURRENT"));
+ bulk_access_executing = TRUE;
+ bulk_access_info_exec_tgt = (PARTITION_BULK_ACCESS_INFO *)info;
+ i = 0;
+ do
+ {
+ int tmp;
+ if ((tmp = (*file)->info_push(info_type,
+ bulk_access_info_exec_tgt->info[i])))
+ error= tmp;
+ ++i;
+ } while (*(++file));
+ DBUG_RETURN(error);
+ case INFO_KIND_BULK_ACCESS_END:
+ DBUG_PRINT("info", ("partition INFO_KIND_BULK_ACCESS_END"));
+ bulk_access_started = FALSE;
+ break;
+ }
+
+ do
+ {
+ int tmp;
+ if ((tmp= (*file)->info_push(info_type, info)))
+ error= tmp;
+ } while (*(++file));
+ DBUG_RETURN(error);
+}
+
+
+/**
Check and set the partition bitmap for partitions involved
in an update operation.
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 833c7ba210a..32ca17c4902 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -43,6 +43,7 @@ struct st_partition_ft_info
};
+/* Bulk access request info */
typedef struct st_partition_bulk_access_info
{
uint sequence_num;
@@ -314,6 +315,7 @@ private:
ha_rows m_bulk_inserted_rows;
/** used for prediction of start_bulk_insert rows */
enum_monotonicity_info m_part_func_monotonicity_info;
+ part_id_range m_direct_update_part_spec;
bool m_pre_calling;
bool m_pre_call_use_parallel;
/* Keep track of bulk access requests */
@@ -552,8 +554,26 @@ public:
*/
virtual int pre_write_row(uchar *buf);
virtual int write_row(uchar * buf);
+ virtual bool start_bulk_update();
+ virtual int exec_bulk_update(uint *dup_key_found);
+ virtual void end_bulk_update();
+ virtual int bulk_update_row(const uchar *old_data, uchar *new_data,
+ uint *dup_key_found);
virtual int update_row(const uchar * old_data, uchar * new_data);
+ virtual int direct_update_rows_init();
+ virtual int pre_direct_update_rows_init();
+ virtual int direct_update_rows(uint *update_rows);
+ virtual int pre_direct_update_rows();
+#if defined(HS_HAS_SQLCOM)
+ virtual bool check_hs_update_overlapping(key_range *key);
+#endif
+ virtual bool start_bulk_delete();
+ virtual int end_bulk_delete();
virtual int delete_row(const uchar * buf);
+ virtual int direct_delete_rows_init();
+ virtual int pre_direct_delete_rows_init();
+ virtual int direct_delete_rows(uint *delete_rows);
+ virtual int pre_direct_delete_rows();
virtual int delete_all_rows(void);
virtual int truncate();
virtual void start_bulk_insert(ha_rows rows, uint flags);
@@ -1344,6 +1364,7 @@ public:
*/
virtual const COND *cond_push(const COND *cond);
virtual void cond_pop();
+ virtual int info_push(uint info_type, void *info);
virtual int set_top_table_and_fields(TABLE *top_table,
Field **top_table_field,
uint top_table_fields);
diff --git a/sql/handler.cc b/sql/handler.cc
index 695721a63e7..c27c26c13ab 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -6052,6 +6052,135 @@ int handler::ha_delete_row(const uchar *buf)
}
+/**
+ Perform initialization for a direct update request.
+
+ @retval 0 Successful initialization.
+ @retval != 0 Initialization failed or
+ direct update not supported.
+*/
+
+int handler::ha_direct_update_rows_init()
+{
+ int error;
+
+ error = direct_update_rows_init();
+ return error;
+}
+
+
+/**
+ Execute a direct update request. A direct update request updates all
+ qualified rows in a single operation, rather than one row at a time.
+ In a Spider cluster the direct update operation is pushed down to the
+ child levels of the cluster.
+
+ @param update_rows Number of updated rows.
+
+ @retval 0 Success.
+ @retval != 0 Failure.
+*/
+
+int handler::ha_direct_update_rows(uint *update_rows)
+{
+ int error;
+ DBUG_ASSERT(inited != NONE);
+
+ MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
+ mark_trx_read_write();
+
+ error = direct_update_rows(update_rows);
+ MYSQL_UPDATE_ROW_DONE(error);
+ return error;
+}
+
+
+/**
+ Log to the binary log the changes for a single row
+ in a direct update request.
+
+ @retval 0 Success.
+ @retval != 0 Failure.
+*/
+
+int handler::ha_direct_update_row_binlog(const uchar *old_data,
+ uchar *new_data)
+{
+ int error;
+ Log_func *log_func= Update_rows_log_event::binlog_row_logging_function;
+
+ /*
+ Some storage engines require that the new record is in record[0]
+ (and the old record is in record[1]).
+ */
+ DBUG_ASSERT(new_data == table->record[0]);
+
+ error= binlog_log_row(table, old_data, new_data, log_func);
+ return error;
+}
+
+
+/**
+ Perform initialization for a direct delete request.
+
+ @retval 0 Successful initialization.
+ @retval != 0 Initialization failed or
+ direct delete not supported.
+*/
+
+int handler::ha_direct_delete_rows_init()
+{
+ int error;
+
+ error = direct_delete_rows_init();
+ return error;
+}
+
+
+/**
+ Execute a direct delete request. A direct delete request deletes all
+ qualified rows in a single operation, rather than one row at a time.
+ In a Spider cluster the direct delete operation is pushed down to the
+ child levels of the cluster.
+
+ @param delete_rows Number of deleted rows.
+
+ @retval 0 Success.
+ @retval != 0 Failure.
+*/
+
+int handler::ha_direct_delete_rows(uint *delete_rows)
+{
+ int error;
+ DBUG_ASSERT(inited != NONE);
+
+ MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str);
+ mark_trx_read_write();
+
+ error = direct_delete_rows(delete_rows);
+ MYSQL_DELETE_ROW_DONE(error);
+ return error;
+}
+
+
+/**
+ Log to the binary log the deletion of a single row
+ in a direct delete request.
+
+ @retval 0 Success.
+ @retval != 0 Failure.
+*/
+
+int handler::ha_direct_delete_row_binlog(const uchar *buf)
+{
+ int error;
+ Log_func *log_func= Delete_rows_log_event::binlog_row_logging_function;
+
+ error= binlog_log_row(table, buf, 0, log_func);
+ return 0;
+}
+
+
/** @brief
use_hidden_primary_key() is called in case of an update/delete when
diff --git a/sql/handler.h b/sql/handler.h
index d98365915a7..f47fb1b2e63 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -43,6 +43,10 @@
#include <keycache.h>
#include <mysql/psi/mysql_table.h>
+#define INFO_KIND_UPDATE_FIELDS 101
+#define INFO_KIND_UPDATE_VALUES 102
+#define INFO_KIND_FORCE_LIMIT_BEGIN 103
+#define INFO_KIND_FORCE_LIMIT_END 104
#define INFO_KIND_BULK_ACCESS_BEGIN 105
#define INFO_KIND_BULK_ACCESS_CURRENT 106
#define INFO_KIND_BULK_ACCESS_END 107
@@ -275,6 +279,10 @@ enum enum_alter_inplace_result {
#define HA_CAN_MULTISTEP_MERGE (1LL << 47)
#define HA_CAN_BULK_ACCESS (1LL << 48)
+/* The following are used by Spider */
+#define HA_CAN_FORCE_BULK_UPDATE (1LL << 49)
+#define HA_CAN_FORCE_BULK_DELETE (1LL << 50)
+
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
#define HA_READ_PREV 2 /* supports ::index_prev */
@@ -2917,6 +2925,29 @@ public:
int ha_pre_write_row(uchar * buf) { return pre_write_row(buf); }
int ha_update_row(const uchar * old_data, uchar * new_data);
int ha_delete_row(const uchar * buf);
+ int ha_direct_update_rows_init();
+ int ha_pre_direct_update_rows_init()
+ {
+ return pre_direct_update_rows_init();
+ }
+ int ha_direct_update_rows(uint *update_rows);
+ int ha_pre_direct_update_rows()
+ {
+ return pre_direct_update_rows();
+ }
+ int ha_direct_update_row_binlog(const uchar *old_data,
+ uchar *new_data);
+ int ha_direct_delete_rows_init();
+ int ha_pre_direct_delete_rows_init()
+ {
+ return pre_direct_delete_rows_init();
+ }
+ int ha_direct_delete_rows(uint *delete_rows);
+ int ha_pre_direct_delete_rows()
+ {
+ return pre_direct_delete_rows();
+ }
+ int ha_direct_delete_row_binlog(const uchar *buf);
virtual void bulk_req_exec() {}
void ha_release_auto_increment();
@@ -3671,6 +3702,11 @@ public:
virtual void cond_pop() { return; };
/**
+ Push metadata for the current operation down to the table handler.
+ */
+ virtual int info_push(uint info_type, void *info) { return 0; };
+
+ /**
This function is used to get correlating of a parent (table/column)
and children (table/column). When conditions are pushed down to child
table (like child of myisam_merge), child table needs to know about
@@ -4102,6 +4138,47 @@ private:
{
return HA_ERR_WRONG_COMMAND;
}
+
+ virtual int direct_update_rows_init()
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int pre_direct_update_rows_init()
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int direct_update_rows(uint *update_rows __attribute__((unused)))
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int pre_direct_update_rows()
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int direct_delete_rows_init()
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int pre_direct_delete_rows_init()
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int direct_delete_rows(uint *delete_rows __attribute__((unused)))
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
+ virtual int pre_direct_delete_rows()
+ {
+ return HA_ERR_WRONG_COMMAND;
+ }
+
/**
Reset state of file to after 'open'.
This function is called after every statement for all tables used
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index ad3f5aed112..766838179dc 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -396,6 +396,8 @@ int opt_sum_query(THD *thd,
const_result= 0;
break;
}
+ longlong info_limit = 1;
+ table->file->info_push(INFO_KIND_FORCE_LIMIT_BEGIN, &info_limit);
if (!(error= table->file->ha_index_init((uint) ref.key, 1)))
error= (is_max ?
get_index_max_value(table, &ref, range_fl) :
@@ -405,10 +407,11 @@ int opt_sum_query(THD *thd,
/* Verify that the read tuple indeed matches the search key */
if (!error && reckey_in_range(is_max, &ref, item_field->field,
conds, range_fl, prefix_len))
- error= HA_ERR_KEY_NOT_FOUND;
- table->set_keyread(false);
- table->file->ha_index_end();
- if (error)
+ error= HA_ERR_KEY_NOT_FOUND;
+ table->set_keyread(false);
+ table->file->ha_index_end();
+ table->file->info_push(INFO_KIND_FORCE_LIMIT_END, NULL);
+ if (error)
{
if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE)
DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // No rows matching WHERE
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 2f4c2d9a6f8..6537afbb4f4 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -241,6 +241,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
SELECT_LEX *select_lex= &thd->lex->select_lex;
killed_state killed_status= NOT_KILLED;
THD::enum_binlog_query_type query_type= THD::ROW_QUERY_TYPE;
+ bool has_triggers, binlog_is_row, do_direct_delete = FALSE;
bool with_select= !select_lex->item_list.is_empty();
Explain_delete *explain;
Delete_plan query_plan(thd->mem_root);
@@ -490,14 +491,33 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (!(select && select->quick))
status_var_increment(thd->status_var.delete_scan_count);
- if (query_plan.using_filesort)
+ has_triggers = (table->triggers && (table->triggers->has_delete_triggers()));
+ DBUG_PRINT("info", ("has_triggers = %s", has_triggers ? "TRUE" : "FALSE"));
+ binlog_is_row = (mysql_bin_log.is_open() &&
+ thd->is_current_stmt_binlog_format_row());
+ DBUG_PRINT("info", ("binlog_is_row = %s", binlog_is_row ? "TRUE" : "FALSE"));
+ if (!has_triggers && !binlog_is_row)
{
+ if (select && select->cond &&
+ (select->cond->used_tables() & table->map) &&
+ !table->file->pushed_cond)
+ {
+ if (!table->file->cond_push(select->cond))
+ table->file->pushed_cond = select->cond;
+ }
+
+ if (!table->file->ha_direct_delete_rows_init())
+ do_direct_delete = TRUE;
+ }
+ if (!do_direct_delete)
+ {
+ if (query_plan.using_filesort)
{
Filesort fsort(order, HA_POS_ERROR, true, select);
DBUG_ASSERT(query_plan.index == MAX_KEY);
- Filesort_tracker *fs_tracker=
+ Filesort_tracker *fs_tracker=
thd->lex->explain->get_upd_del_plan()->filesort_tracker;
if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
@@ -536,94 +556,113 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
init_ftfuncs(thd, select_lex, 1);
THD_STAGE_INFO(thd, stage_updating);
- if (table->prepare_triggers_for_delete_stmt_or_event())
+ if (!do_direct_delete)
{
- will_batch= FALSE;
+ if (!(table->file->ha_table_flags() & HA_CAN_FORCE_BULK_DELETE) &&
+ table->prepare_triggers_for_delete_stmt_or_event())
+ will_batch= FALSE;
+ else
+ will_batch= !table->file->start_bulk_delete();
}
- else
- will_batch= !table->file->start_bulk_delete();
table->mark_columns_needed_for_delete();
- if (with_select)
+ if (do_direct_delete)
{
- if (result->send_result_set_metadata(select_lex->item_list,
- Protocol::SEND_NUM_ROWS |
- Protocol::SEND_EOF))
- goto cleanup;
+ /* Direct deleting is supported */
+ uint delete_rows = 0;
+ error = table->file->ha_direct_delete_rows(&delete_rows);
+ deleted = delete_rows;
+ if (!error)
+ error = -1;
}
-
- explain= (Explain_delete*)thd->lex->explain->get_upd_del_plan();
- explain->tracker.on_scan_init();
-
- while (!(error=info.read_record(&info)) && !thd->killed &&
- ! thd->is_error())
+ else
{
- explain->tracker.on_record_read();
- thd->inc_examined_row_count(1);
- if (table->vfield)
- (void) table->update_virtual_fields(VCOL_UPDATE_FOR_DELETE);
- if (!select || select->skip_record(thd) > 0)
+ /* Direct deleting is not supported */
+ if (with_select)
{
- explain->tracker.on_record_after_where();
- if (table->triggers &&
- table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
- TRG_ACTION_BEFORE, FALSE))
- {
- error= 1;
- break;
- }
+ if (result->send_result_set_metadata(select_lex->item_list,
+ Protocol::SEND_NUM_ROWS |
+ Protocol::SEND_EOF))
+ goto cleanup;
+ }
- if (with_select && result->send_data(select_lex->item_list) < 0)
- {
- error=1;
- break;
- }
+ explain= (Explain_delete*)thd->lex->explain->get_upd_del_plan();
+ explain->tracker.on_scan_init();
- if (!(error= table->file->ha_delete_row(table->record[0])))
+ while (!(error=info.read_record(&info)) && !thd->killed &&
+ !thd->is_error())
+ {
+ explain->tracker.on_record_read();
+ thd->inc_examined_row_count(1);
+ if (table->vfield)
+ (void) table->update_virtual_fields(VCOL_UPDATE_FOR_DELETE);
+ if (!select || select->skip_record(thd) > 0)
{
- deleted++;
+ explain->tracker.on_record_after_where();
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
- TRG_ACTION_AFTER, FALSE))
+ TRG_ACTION_BEFORE, FALSE))
{
error= 1;
break;
}
- if (!--limit && using_limit)
- {
- error= -1;
- break;
- }
- }
- else
- {
- table->file->print_error(error,
- MYF(thd->lex->ignore ? ME_JUST_WARNING : 0));
- if (thd->is_error())
+
+ if (with_select && result->send_data(select_lex->item_list) < 0)
{
- error= 1;
+ error=1;
break;
}
+
+ if (!(error= table->file->ha_delete_row(table->record[0])))
+ {
+ deleted++;
+ if (table->triggers &&
+ table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
+ TRG_ACTION_AFTER, FALSE))
+ {
+ error= 1;
+ break;
+ }
+ if (!--limit && using_limit)
+ {
+ error= -1;
+ break;
+ }
+ }
+ else
+ {
+ table->file->print_error(error,
+ MYF(thd->lex->ignore ? ME_JUST_WARNING
+ : 0));
+ if (thd->is_error())
+ {
+ error= 1;
+ break;
+ }
+ }
}
+ /*
+ Don't try unlocking the row if skip_record reported an error since in
+ this case the transaction might have been rolled back already.
+ */
+ else if (!thd->is_error())
+ table->file->unlock_row(); // Row failed selection, release lock on it
+ else
+ break;
}
- /*
- Don't try unlocking the row if skip_record reported an error since in
- this case the transaction might have been rolled back already.
- */
- else if (!thd->is_error())
- table->file->unlock_row(); // Row failed selection, release lock on it
- else
- break;
}
killed_status= thd->killed;
if (killed_status != NOT_KILLED || thd->is_error())
error= 1; // Aborted
- if (will_batch && (loc_error= table->file->end_bulk_delete()))
+ if (!do_direct_delete)
{
- if (error != 1)
- table->file->print_error(loc_error,MYF(0));
- error=1;
+ if (will_batch && (loc_error= table->file->end_bulk_delete()))
+ {
+ if (error != 1)
+ table->file->print_error(loc_error, MYF(0));
+ error=1;
+ }
}
THD_STAGE_INFO(thd, stage_end);
end_read_record(&info);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index c46b80f798b..7a5bd525834 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -276,6 +276,7 @@ int mysql_update(THD *thd,
ulonglong id;
List<Item> all_fields;
killed_state killed_status= NOT_KILLED;
+ bool has_triggers, binlog_is_row, do_direct_update = FALSE;
Update_plan query_plan(thd->mem_root);
Explain_update *explain;
query_plan.index= MAX_KEY;
@@ -526,164 +527,192 @@ int mysql_update(THD *thd,
DBUG_EXECUTE_IF("show_explain_probe_update_exec_start",
dbug_serve_apcs(thd, 1););
+ has_triggers = (table->triggers &&
+ (table->triggers->has_triggers(TRG_EVENT_UPDATE,
+ TRG_ACTION_BEFORE) ||
+ table->triggers->has_triggers(TRG_EVENT_UPDATE,
+ TRG_ACTION_AFTER)));
+ DBUG_PRINT("info", ("has_triggers = %s", has_triggers ? "TRUE" : "FALSE"));
+ binlog_is_row = (mysql_bin_log.is_open() &&
+ thd->is_current_stmt_binlog_format_row());
+ DBUG_PRINT("info", ("binlog_is_row = %s", binlog_is_row ? "TRUE" : "FALSE"));
+ if (!has_triggers && !binlog_is_row)
+ {
+ if (select && select->cond &&
+ (select->cond->used_tables() & table->map) &&
+ !table->file->pushed_cond)
+ {
+ if (!table->file->cond_push(select->cond))
+ table->file->pushed_cond = select->cond;
+ }
+
+ if (!table->file->info_push(INFO_KIND_UPDATE_FIELDS, &fields) &&
+ !table->file->info_push(INFO_KIND_UPDATE_VALUES, &values) &&
+ !table->file->ha_direct_update_rows_init())
+ do_direct_update = TRUE;
+ }
+
if (!(select && select->quick))
status_var_increment(thd->status_var.update_scan_count);
- if (query_plan.using_filesort || query_plan.using_io_buffer)
+ if (!do_direct_update)
{
- /*
- We can't update table directly; We must first search after all
- matching rows before updating the table!
- */
- MY_BITMAP *save_read_set= table->read_set;
- MY_BITMAP *save_write_set= table->write_set;
-
- if (query_plan.index < MAX_KEY && old_covering_keys.is_set(query_plan.index))
- table->add_read_columns_used_by_index(query_plan.index);
- else
- table->use_all_columns();
-
- /* note: We avoid sorting if we sort on the used index */
- if (query_plan.using_filesort)
+ if (query_plan.using_filesort || query_plan.using_io_buffer)
{
/*
- Doing an ORDER BY; Let filesort find and sort the rows we are going
- to update
- NOTE: filesort will call table->prepare_for_position()
+ We can't update table directly; We must first search after all
+ matching rows before updating the table!
*/
- Filesort fsort(order, limit, true, select);
+ MY_BITMAP *save_read_set= table->read_set;
+ MY_BITMAP *save_write_set= table->write_set;
- Filesort_tracker *fs_tracker=
- thd->lex->explain->get_upd_del_plan()->filesort_tracker;
+ if (query_plan.index < MAX_KEY && old_covering_keys.is_set(query_plan.index))
+ table->add_read_columns_used_by_index(query_plan.index);
+ else
+ table->use_all_columns();
- if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
- goto err;
- thd->inc_examined_row_count(file_sort->examined_rows);
+ /* note: We avoid sorting if we sort on the used index */
+ if (query_plan.using_filesort)
+ {
+ /*
+ Doing an ORDER BY; Let filesort find and sort the rows we are going
+ to update
+ NOTE: filesort will call table->prepare_for_position()
+ */
+ Filesort fsort(order, limit, true, select);
- /*
- Filesort has already found and selected the rows we want to update,
- so we don't need the where clause
- */
- delete select;
- select= 0;
- }
- else
- {
- /*
- We are doing a search on a key that is updated. In this case
- we go trough the matching rows, save a pointer to them and
- update these in a separate loop based on the pointer.
- */
- explain->buf_tracker.on_scan_init();
- IO_CACHE tempfile;
- if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
- DISK_BUFFER_SIZE, MYF(MY_WME)))
- goto err;
+ Filesort_tracker *fs_tracker=
+ thd->lex->explain->get_upd_del_plan()->filesort_tracker;
- /* If quick select is used, initialize it before retrieving rows. */
- if (select && select->quick && select->quick->reset())
- {
- close_cached_file(&tempfile);
- goto err;
+ if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
+ goto err;
+ thd->inc_examined_row_count(file_sort->examined_rows);
+
+ /*
+ Filesort has already found and selected the rows we want to update,
+ so we don't need the where clause
+ */
+ delete select;
+ select= 0;
}
+ else
+ {
+ /*
+ We are doing a search on a key that is updated. In this case
+ we go trough the matching rows, save a pointer to them and
+ update these in a separate loop based on the pointer.
+ */
+ explain->buf_tracker.on_scan_init();
+ IO_CACHE tempfile;
+ if (open_cached_file(&tempfile, mysql_tmpdir, TEMP_PREFIX,
+ DISK_BUFFER_SIZE, MYF(MY_WME)))
+ goto err;
- table->file->try_semi_consistent_read(1);
+ /* If quick select is used, initialize it before retrieving rows. */
+ if (select && select->quick && select->quick->reset())
+ {
+ close_cached_file(&tempfile);
+ goto err;
+ }
- /*
- When we get here, we have one of the following options:
- A. query_plan.index == MAX_KEY
- This means we should use full table scan, and start it with
- init_read_record call
- B. query_plan.index != MAX_KEY
- B.1 quick select is used, start the scan with init_read_record
- B.2 quick select is not used, this is full index scan (with LIMIT)
- Full index scan must be started with init_read_record_idx
- */
+ table->file->try_semi_consistent_read(1);
- if (query_plan.index == MAX_KEY || (select && select->quick))
- error= init_read_record(&info, thd, table, select, NULL, 0, 1, FALSE);
- else
- error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
- reverse);
+ /*
+ When we get here, we have one of the following options:
+ A. query_plan.index == MAX_KEY
+ This means we should use full table scan, and start it with
+ init_read_record call
+ B. query_plan.index != MAX_KEY
+ B.1 quick select is used, start the scan with init_read_record
+ B.2 quick select is not used, this is full index scan (with LIMIT)
+ Full index scan must be started with init_read_record_idx
+ */
- if (error)
- {
- close_cached_file(&tempfile);
- goto err;
- }
+ if (query_plan.index == MAX_KEY || (select && select->quick))
+ error= init_read_record(&info, thd, table, select, NULL, 0, 1, FALSE);
+ else
+ error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
+ reverse);
- THD_STAGE_INFO(thd, stage_searching_rows_for_update);
- ha_rows tmp_limit= limit;
+ if (error)
+ {
+ close_cached_file(&tempfile);
+ goto err;
+ }
- while (!(error=info.read_record(&info)) && !thd->killed)
- {
- explain->buf_tracker.on_record_read();
- thd->inc_examined_row_count(1);
- if (!select || (error= select->skip_record(thd)) > 0)
- {
- if (table->file->ha_was_semi_consistent_read())
- continue; /* repeat the read of the same row if it still exists */
-
- explain->buf_tracker.on_record_after_where();
- table->file->position(table->record[0]);
- if (my_b_write(&tempfile,table->file->ref,
- table->file->ref_length))
- {
- error=1; /* purecov: inspected */
- break; /* purecov: inspected */
- }
- if (!--limit && using_limit)
- {
- error= -1;
- break;
- }
- }
- else
+ THD_STAGE_INFO(thd, stage_searching_rows_for_update);
+ ha_rows tmp_limit= limit;
+
+ while (!(error=info.read_record(&info)) && !thd->killed)
{
- /*
- Don't try unlocking the row if skip_record reported an
- error since in this case the transaction might have been
- rolled back already.
- */
- if (error < 0)
+ explain->buf_tracker.on_record_read();
+ thd->inc_examined_row_count(1);
+ if (!select || (error= select->skip_record(thd)) > 0)
{
- /* Fatal error from select->skip_record() */
- error= 1;
- break;
+ if (table->file->ha_was_semi_consistent_read())
+ continue; /* repeat the read of the same row if it still exists */
+
+ explain->buf_tracker.on_record_after_where();
+ table->file->position(table->record[0]);
+ if (my_b_write(&tempfile, table->file->ref,
+ table->file->ref_length))
+ {
+ error=1; /* purecov: inspected */
+ break; /* purecov: inspected */
+ }
+ if (!--limit && using_limit)
+ {
+ error= -1;
+ break;
+ }
}
else
- table->file->unlock_row();
+ {
+ /*
+ Don't try unlocking the row if skip_record reported an
+ error since in this case the transaction might have been
+ rolled back already.
+ */
+ if (error < 0)
+ {
+ /* Fatal error from select->skip_record() */
+ error= 1;
+ break;
+ }
+ else
+ table->file->unlock_row();
+ }
+ }
+ if (thd->killed && !error)
+ error= 1; // Aborted
+ limit= tmp_limit;
+ table->file->try_semi_consistent_read(0);
+ end_read_record(&info);
+
+ /* Change select to use tempfile */
+ if (select)
+ {
+ delete select->quick;
+ if (select->free_cond)
+ delete select->cond;
+ select->quick=0;
+ select->cond=0;
+ }
+ else
+ {
+ select= new SQL_SELECT;
+ select->head=table;
}
- }
- if (thd->killed && !error)
- error= 1; // Aborted
- limit= tmp_limit;
- table->file->try_semi_consistent_read(0);
- end_read_record(&info);
-
- /* Change select to use tempfile */
- if (select)
- {
- delete select->quick;
- if (select->free_cond)
- delete select->cond;
- select->quick=0;
- select->cond=0;
- }
- else
- {
- select= new SQL_SELECT;
- select->head=table;
- }
- if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
- error=1; /* purecov: inspected */
- select->file=tempfile; // Read row ptrs from this file
- if (error >= 0)
- goto err;
+ if (reinit_io_cache(&tempfile, READ_CACHE, 0L, 0, 0))
+ error=1; /* purecov: inspected */
+ select->file=tempfile; // Read row ptrs from this file
+ if (error >= 0)
+ goto err;
+ }
+ table->set_keyread(false);
+ table->column_bitmaps_set(save_read_set, save_write_set);
}
- table->set_keyread(false);
- table->column_bitmaps_set(save_read_set, save_write_set);
}
if (ignore)
@@ -706,242 +735,255 @@ int mysql_update(THD *thd,
transactional_table= table->file->has_transactions();
thd->abort_on_warning= !ignore && thd->is_strict_mode();
- if (table->prepare_triggers_for_update_stmt_or_event())
+ if (do_direct_update)
{
- will_batch= FALSE;
+ /* Direct updating is supported */
+ uint update_rows = 0;
+ error = table->file->ha_direct_update_rows(&update_rows);
+ updated = update_rows;
+ found = update_rows;
+ if (!error)
+ error = -1;
}
else
- will_batch= !table->file->start_bulk_update();
+ {
+ /* Direct updating is not supported */
+ if (!(table->file->ha_table_flags() & HA_CAN_FORCE_BULK_UPDATE) &&
+ table->prepare_triggers_for_update_stmt_or_event())
+ will_batch= FALSE;
+ else
+ will_batch= !table->file->start_bulk_update();
- /*
- Assure that we can use position()
- if we need to create an error message.
- */
- if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
- table->prepare_for_position();
+ /*
+ Assure that we can use position()
+ if we need to create an error message.
+ */
+ if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
+ table->prepare_for_position();
- table->reset_default_fields();
+ table->reset_default_fields();
- /*
- We can use compare_record() to optimize away updates if
- the table handler is returning all columns OR if
- if all updated columns are read
- */
- can_compare_record= records_are_comparable(table);
- explain->tracker.on_scan_init();
+ /*
+ We can use compare_record() to optimize away updates if
+ the table handler is returning all columns OR if
+ if all updated columns are read
+ */
+ can_compare_record= records_are_comparable(table);
+ explain->tracker.on_scan_init();
- while (!(error=info.read_record(&info)) && !thd->killed)
- {
- explain->tracker.on_record_read();
- thd->inc_examined_row_count(1);
- if (!select || select->skip_record(thd) > 0)
+ while (!(error=info.read_record(&info)) && !thd->killed)
{
- if (table->file->ha_was_semi_consistent_read())
- continue; /* repeat the read of the same row if it still exists */
+ explain->tracker.on_record_read();
+ thd->inc_examined_row_count(1);
+ if (!select || select->skip_record(thd) > 0)
+ {
+ if (table->file->ha_was_semi_consistent_read())
+ continue; /* repeat the read of the same row if it still exists */
- explain->tracker.on_record_after_where();
- store_record(table,record[1]);
- if (fill_record_n_invoke_before_triggers(thd, table, fields, values, 0,
- TRG_EVENT_UPDATE))
- break; /* purecov: inspected */
+ explain->tracker.on_record_after_where();
+ store_record(table, record[1]);
+ if (fill_record_n_invoke_before_triggers(thd, table, fields, values, 0,
+ TRG_EVENT_UPDATE))
+ break; /* purecov: inspected */
- found++;
+ found++;
- if (!can_compare_record || compare_record(table))
- {
- if (table->default_field && table->update_default_fields(1, ignore))
+ if (!can_compare_record || compare_record(table))
{
- error= 1;
- break;
- }
- if ((res= table_list->view_check_option(thd, ignore)) !=
- VIEW_CHECK_OK)
- {
- found--;
- if (res == VIEW_CHECK_SKIP)
- continue;
- else if (res == VIEW_CHECK_ERROR)
+ if (table->default_field && table->update_default_fields(1, ignore))
{
error= 1;
break;
}
- }
- if (will_batch)
- {
- /*
- Typically a batched handler can execute the batched jobs when:
- 1) When specifically told to do so
- 2) When it is not a good idea to batch anymore
- 3) When it is necessary to send batch for other reasons
- (One such reason is when READ's must be performed)
-
- 1) is covered by exec_bulk_update calls.
- 2) and 3) is handled by the bulk_update_row method.
-
- bulk_update_row can execute the updates including the one
- defined in the bulk_update_row or not including the row
- in the call. This is up to the handler implementation and can
- vary from call to call.
-
- The dup_key_found reports the number of duplicate keys found
- in those updates actually executed. It only reports those if
- the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
- If this hasn't been issued it returns an error code and can
- ignore this number. Thus any handler that implements batching
- for UPDATE IGNORE must also handle this extra call properly.
-
- If a duplicate key is found on the record included in this
- call then it should be included in the count of dup_key_found
- and error should be set to 0 (only if these errors are ignored).
- */
- error= table->file->ha_bulk_update_row(table->record[1],
- table->record[0],
- &dup_key_found);
- limit+= dup_key_found;
- updated-= dup_key_found;
- }
- else
- {
- /* Non-batched update */
- error= table->file->ha_update_row(table->record[1],
- table->record[0]);
- }
- if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
- {
- if (error != HA_ERR_RECORD_IS_THE_SAME)
- updated++;
+ if ((res= table_list->view_check_option(thd, ignore)) !=
+ VIEW_CHECK_OK)
+ {
+ found--;
+ if (res == VIEW_CHECK_SKIP)
+ continue;
+ else if (res == VIEW_CHECK_ERROR)
+ {
+ error= 1;
+ break;
+ }
+ }
+ if (will_batch)
+ {
+ /*
+ Typically a batched handler can execute the batched jobs when:
+ 1) When specifically told to do so
+ 2) When it is not a good idea to batch anymore
+ 3) When it is necessary to send batch for other reasons
+ (One such reason is when READ's must be performed)
+
+ 1) is covered by exec_bulk_update calls.
+ 2) and 3) is handled by the bulk_update_row method.
+
+ bulk_update_row can execute the updates including the one
+ defined in the bulk_update_row or not including the row
+ in the call. This is up to the handler implementation and can
+ vary from call to call.
+
+ The dup_key_found reports the number of duplicate keys found
+ in those updates actually executed. It only reports those if
+ the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
+ If this hasn't been issued it returns an error code and can
+ ignore this number. Thus any handler that implements batching
+ for UPDATE IGNORE must also handle this extra call properly.
+
+ If a duplicate key is found on the record included in this
+ call then it should be included in the count of dup_key_found
+ and error should be set to 0 (only if these errors are ignored).
+ */
+ error= table->file->ha_bulk_update_row(table->record[1],
+ table->record[0],
+ &dup_key_found);
+ limit+= dup_key_found;
+ updated-= dup_key_found;
+ }
else
- error= 0;
- }
- else if (!ignore ||
- table->file->is_fatal_error(error, HA_CHECK_ALL))
- {
- /*
- If (ignore && error is ignorable) we don't have to
- do anything; otherwise...
- */
- myf flags= 0;
+ {
+ /* Non-batched update */
+ error= table->file->ha_update_row(table->record[1],
+ table->record[0]);
+ }
+ if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
+ {
+ if (error != HA_ERR_RECORD_IS_THE_SAME)
+ updated++;
+ else
+ error= 0;
+ }
+ else if (!ignore ||
+ table->file->is_fatal_error(error, HA_CHECK_ALL))
+ {
+ /*
+ If (ignore && error is ignorable) we don't have to
+ do anything; otherwise...
+ */
+ myf flags= 0;
- if (table->file->is_fatal_error(error, HA_CHECK_ALL))
- flags|= ME_FATALERROR; /* Other handler errors are fatal */
+ if (table->file->is_fatal_error(error, HA_CHECK_ALL))
+ flags|= ME_FATALERROR; /* Other handler errors are fatal */
- prepare_record_for_error_message(error, table);
- table->file->print_error(error,MYF(flags));
- error= 1;
- break;
- }
- }
+ prepare_record_for_error_message(error, table);
+ table->file->print_error(error, MYF(flags));
+ error= 1;
+ break;
+ }
+ }
- if (table->triggers &&
+ if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE))
- {
- error= 1;
- break;
- }
+ {
+ error= 1;
+ break;
+ }
- if (!--limit && using_limit)
- {
- /*
- We have reached end-of-file in most common situations where no
- batching has occurred and if batching was supposed to occur but
- no updates were made and finally when the batch execution was
- performed without error and without finding any duplicate keys.
- If the batched updates were performed with errors we need to
- check and if no error but duplicate key's found we need to
- continue since those are not counted for in limit.
- */
- if (will_batch &&
- ((error= table->file->exec_bulk_update(&dup_key_found)) ||
- dup_key_found))
+ if (!--limit && using_limit)
{
- if (error)
+ /*
+ We have reached end-of-file in most common situations where no
+ batching has occurred and if batching was supposed to occur but
+ no updates were made and finally when the batch execution was
+ performed without error and without finding any duplicate keys.
+ If the batched updates were performed with errors we need to
+ check and if no error but duplicate key's found we need to
+ continue since those are not counted for in limit.
+ */
+ if (will_batch &&
+ ((error= table->file->exec_bulk_update(&dup_key_found)) ||
+ dup_key_found))
{
- /* purecov: begin inspected */
+ if (error)
+ {
+ /* purecov: begin inspected */
+ /*
+ The handler should not report error of duplicate keys if they
+ are ignored. This is a requirement on batching handlers.
+ */
+ prepare_record_for_error_message(error, table);
+ table->file->print_error(error, MYF(0));
+ error= 1;
+ break;
+ /* purecov: end */
+ }
/*
- The handler should not report error of duplicate keys if they
- are ignored. This is a requirement on batching handlers.
+ Either an error was found and we are ignoring errors or there
+ were duplicate keys found. In both cases we need to correct
+ the counters and continue the loop.
*/
- prepare_record_for_error_message(error, table);
- table->file->print_error(error,MYF(0));
- error= 1;
+ limit= dup_key_found; //limit is 0 when we get here so need to +
+ updated-= dup_key_found;
+ }
+ else
+ {
+ error= -1; // Simulate end of file
break;
- /* purecov: end */
}
- /*
- Either an error was found and we are ignoring errors or there
- were duplicate keys found. In both cases we need to correct
- the counters and continue the loop.
- */
- limit= dup_key_found; //limit is 0 when we get here so need to +
- updated-= dup_key_found;
- }
- else
- {
- error= -1; // Simulate end of file
- break;
}
}
+ /*
+ Don't try unlocking the row if skip_record reported an error since in
+ this case the transaction might have been rolled back already.
+ */
+ else if (!thd->is_error())
+ table->file->unlock_row();
+ else
+ {
+ error= 1;
+ break;
+ }
+ thd->get_stmt_da()->inc_current_row_for_warning();
+ if (thd->is_error())
+ {
+ error= 1;
+ break;
+ }
}
+ ANALYZE_STOP_TRACKING(&explain->command_tracker);
+ table->auto_increment_field_not_null= FALSE;
+ dup_key_found= 0;
/*
- Don't try unlocking the row if skip_record reported an error since in
- this case the transaction might have been rolled back already.
+ Caching the killed status to pass as the arg to query event constuctor;
+ The cached value can not change whereas the killed status can
+ (externally) since this point and change of the latter won't affect
+ binlogging.
+ It's assumed that if an error was set in combination with an effective
+ killed status then the error is due to killing.
*/
- else if (!thd->is_error())
- table->file->unlock_row();
- else
+ killed_status= thd->killed; // get the status of the volatile
+ // simulated killing after the loop must be ineffective for binlogging
+ DBUG_EXECUTE_IF("simulate_kill_bug27571",
{
- error= 1;
- break;
- }
- thd->get_stmt_da()->inc_current_row_for_warning();
- if (thd->is_error())
+ thd->killed= KILL_QUERY;
+ };);
+ error= (killed_status == NOT_KILLED)? error : 1;
+
+ if (error &&
+ will_batch &&
+ (loc_error= table->file->exec_bulk_update(&dup_key_found)))
+ /*
+ An error has occurred when a batched update was performed and returned
+ an error indication. It cannot be an allowed duplicate key error since
+ we require the batching handler to treat this as a normal behavior.
+
+ Otherwise we simply remove the number of duplicate keys records found
+ in the batched update.
+ */
{
+ /* purecov: begin inspected */
+ prepare_record_for_error_message(loc_error, table);
+ table->file->print_error(loc_error, MYF(ME_FATALERROR));
error= 1;
- break;
+ /* purecov: end */
}
+ else
+ updated-= dup_key_found;
+ if (will_batch)
+ table->file->end_bulk_update();
}
- ANALYZE_STOP_TRACKING(&explain->command_tracker);
- table->auto_increment_field_not_null= FALSE;
- dup_key_found= 0;
- /*
- Caching the killed status to pass as the arg to query event constuctor;
- The cached value can not change whereas the killed status can
- (externally) since this point and change of the latter won't affect
- binlogging.
- It's assumed that if an error was set in combination with an effective
- killed status then the error is due to killing.
- */
- killed_status= thd->killed; // get the status of the volatile
- // simulated killing after the loop must be ineffective for binlogging
- DBUG_EXECUTE_IF("simulate_kill_bug27571",
- {
- thd->killed= KILL_QUERY;
- };);
- error= (killed_status == NOT_KILLED)? error : 1;
-
- if (error &&
- will_batch &&
- (loc_error= table->file->exec_bulk_update(&dup_key_found)))
- /*
- An error has occurred when a batched update was performed and returned
- an error indication. It cannot be an allowed duplicate key error since
- we require the batching handler to treat this as a normal behavior.
-
- Otherwise we simply remove the number of duplicate keys records found
- in the batched update.
- */
- {
- /* purecov: begin inspected */
- prepare_record_for_error_message(loc_error, table);
- table->file->print_error(loc_error,MYF(ME_FATALERROR));
- error= 1;
- /* purecov: end */
- }
- else
- updated-= dup_key_found;
- if (will_batch)
- table->file->end_bulk_update();
table->file->try_semi_consistent_read(0);
if (!transactional_table && updated > 0)
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index 988e082c476..cf180cafebc 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -19,6 +19,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h
index ba702f62a8c..9c3f600e1ae 100644
--- a/storage/spider/ha_spider.h
+++ b/storage/spider/ha_spider.h
@@ -17,20 +17,7 @@
#pragma interface
#endif
-#if (defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000)
-#define SPIDER_HANDLER_START_BULK_INSERT_HAS_FLAGS
-#endif
-
-#if MYSQL_VERSION_ID >= 100203
-#define HANDLER_HAS_TOP_TABLE_FIELDS
-#define HANDLER_HAS_NEED_INFO_FOR_AUTO_INC
-#define HANDLER_HAS_CAN_USE_FOR_AUTO_INC_INIT
-#define PARTITION_HAS_EXTRA_ATTACH_CHILDREN
-#define PARTITION_HAS_GET_CHILD_HANDLERS
-#define PARTITION_HAS_EXTRA_ATTACH_CHILDREN
-#define PARTITION_HAS_GET_CHILD_HANDLERS
-#define HA_EXTRA_HAS_STARTING_ORDERED_INDEX_SCAN
-#endif
+#include "spd_environ.h"
#define SPIDER_CONNECT_INFO_MAX_LEN 64
#define SPIDER_CONNECT_INFO_PATH_MAX_LEN FN_REFLEN
@@ -588,6 +575,17 @@ public:
uchar *new_data
);
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS
+ inline int ha_direct_update_rows_init(uint mode,
+ KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ uchar *new_data)
+ {
+ return handler::ha_direct_update_rows_init();
+ }
+ inline int direct_update_rows_init()
+ {
+ return direct_update_rows_init(2, NULL, 0, FALSE, NULL);
+ }
int direct_update_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
@@ -596,6 +594,17 @@ public:
uchar *new_data
);
#ifdef HA_CAN_BULK_ACCESS
+ inline int ha_pre_direct_update_rows_init(uint mode,
+ KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ uchar *new_data)
+ {
+ return handler::ha_pre_direct_update_rows_init();
+ }
+ inline int pre_direct_update_rows_init()
+ {
+ return pre_direct_update_rows_init(2, NULL, 0, FALSE, NULL);
+ }
int pre_direct_update_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
@@ -604,6 +613,16 @@ public:
uchar *new_data
);
#endif
+ inline int ha_direct_update_rows(KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ uchar *new_data, uint *update_rows)
+ {
+ return handler::ha_direct_update_rows(update_rows);
+ }
+ inline int direct_update_rows(uint *update_rows)
+ {
+ return direct_update_rows(NULL, 0, FALSE, NULL, update_rows);
+ }
int direct_update_rows(
KEY_MULTI_RANGE *ranges,
uint range_count,
@@ -612,6 +631,18 @@ public:
uint *update_rows
);
#ifdef HA_CAN_BULK_ACCESS
+ inline int ha_pre_direct_update_rows(KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ uchar *new_data, uint *update_rows)
+ {
+ return handler::ha_pre_direct_update_rows();
+ }
+ inline int pre_direct_update_rows()
+ {
+ uint *update_rows;
+
+ return pre_direct_update_rows(NULL, 0, FALSE, NULL, update_rows);
+ }
int pre_direct_update_rows(
KEY_MULTI_RANGE *ranges,
uint range_count,
@@ -627,6 +658,16 @@ public:
const uchar *buf
);
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS
+ inline int ha_direct_delete_rows_init(uint mode,
+ KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted)
+ {
+ return handler::ha_direct_delete_rows_init();
+ }
+ inline int direct_delete_rows_init()
+ {
+ return direct_delete_rows_init(2, NULL, 0, FALSE);
+ }
int direct_delete_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
@@ -634,6 +675,16 @@ public:
bool sorted
);
#ifdef HA_CAN_BULK_ACCESS
+ inline int ha_pre_direct_delete_rows_init(uint mode,
+ KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted)
+ {
+ return handler::ha_pre_direct_delete_rows_init();
+ }
+ inline int pre_direct_delete_rows_init()
+ {
+ return pre_direct_delete_rows_init(2, NULL, 0, FALSE);
+ }
int pre_direct_delete_rows_init(
uint mode,
KEY_MULTI_RANGE *ranges,
@@ -641,6 +692,16 @@ public:
bool sorted
);
#endif
+ inline int ha_direct_delete_rows(KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ uint *delete_rows)
+ {
+ return handler::ha_direct_delete_rows(delete_rows);
+ }
+ inline int direct_delete_rows(uint *delete_rows)
+ {
+ return direct_delete_rows(NULL, 0, FALSE, delete_rows);
+ }
int direct_delete_rows(
KEY_MULTI_RANGE *ranges,
uint range_count,
@@ -648,6 +709,18 @@ public:
uint *delete_rows
);
#ifdef HA_CAN_BULK_ACCESS
+ inline int ha_pre_direct_delete_rows(KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ uint *delete_rows)
+ {
+ return handler::ha_pre_direct_delete_rows();
+ }
+ inline int pre_direct_delete_rows()
+ {
+ uint *delete_rows;
+
+ return pre_direct_delete_rows(NULL, 0, FALSE, delete_rows);
+ }
int pre_direct_delete_rows(
KEY_MULTI_RANGE *ranges,
uint range_count,
diff --git a/storage/spider/mysql-test/spider/bg/r/direct_update.result b/storage/spider/mysql-test/spider/bg/r/direct_update.result
index 74dae7aec2e..0e536d48617 100644
--- a/storage/spider/mysql-test/spider/bg/r/direct_update.result
+++ b/storage/spider/mysql-test/spider/bg/r/direct_update.result
@@ -48,6 +48,7 @@ direct_updating test
connection master_1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 0
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-01 10:21:39
@@ -59,6 +60,7 @@ update all rows with function
UPDATE ta_l SET c = ADDDATE(c, 1);
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -70,6 +72,7 @@ update by primary key
UPDATE ta_l SET b = 'x' WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -81,6 +84,7 @@ update by a column without index
UPDATE ta_l SET c = '2011-10-17' WHERE b = 'x';
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -92,6 +96,7 @@ update by primary key with order and limit
UPDATE ta_l SET c = ADDDATE(c, 1) WHERE a < 4 ORDER BY b DESC LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 4
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -103,6 +108,7 @@ delete by primary key with order and limit
DELETE FROM ta_l WHERE a < 4 ORDER BY c LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -113,6 +119,7 @@ delete by a column without index
DELETE FROM ta_l WHERE b = 'c';
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -122,6 +129,7 @@ delete by primary key
DELETE FROM ta_l WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
diff --git a/storage/spider/mysql-test/spider/bg/r/direct_update_part.result b/storage/spider/mysql-test/spider/bg/r/direct_update_part.result
index 6db7c01f563..7069cd72fda 100644
--- a/storage/spider/mysql-test/spider/bg/r/direct_update_part.result
+++ b/storage/spider/mysql-test/spider/bg/r/direct_update_part.result
@@ -38,6 +38,7 @@ PRIMARY KEY(a)
) MASTER_1_ENGINE MASTER_1_COMMENT2_P_2_1
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 0
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-01 10:21:39
@@ -49,6 +50,7 @@ update all rows with function
UPDATE ta_l2 SET c = ADDDATE(c, 1);
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -60,6 +62,7 @@ update by primary key
UPDATE ta_l2 SET b = 'x' WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -71,6 +74,7 @@ update by a column without index
UPDATE ta_l2 SET c = '2011-10-17' WHERE b = 'x';
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 5
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -82,6 +86,7 @@ update by primary key with order and limit
UPDATE ta_l2 SET c = ADDDATE(c, 1) WHERE a < 4 ORDER BY b DESC LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 6
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -93,6 +98,7 @@ delete by primary key with order and limit
DELETE FROM ta_l2 WHERE a < 4 ORDER BY c LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -103,6 +109,7 @@ delete by a column without index
DELETE FROM ta_l2 WHERE b = 'c';
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -112,6 +119,7 @@ delete by primary key
DELETE FROM ta_l2 WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 4
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
diff --git a/storage/spider/mysql-test/spider/bg/r/spider_fixes.result b/storage/spider/mysql-test/spider/bg/r/spider_fixes.result
index f50c9822534..1db31ca9f95 100644
--- a/storage/spider/mysql-test/spider/bg/r/spider_fixes.result
+++ b/storage/spider/mysql-test/spider/bg/r/spider_fixes.result
@@ -461,6 +461,7 @@ Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
DELETE FROM t1;
Warnings:
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
+Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
TRUNCATE t1;
Warnings:
diff --git a/storage/spider/mysql-test/spider/handler/r/direct_update.result b/storage/spider/mysql-test/spider/handler/r/direct_update.result
index 74dae7aec2e..0e536d48617 100644
--- a/storage/spider/mysql-test/spider/handler/r/direct_update.result
+++ b/storage/spider/mysql-test/spider/handler/r/direct_update.result
@@ -48,6 +48,7 @@ direct_updating test
connection master_1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 0
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-01 10:21:39
@@ -59,6 +60,7 @@ update all rows with function
UPDATE ta_l SET c = ADDDATE(c, 1);
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -70,6 +72,7 @@ update by primary key
UPDATE ta_l SET b = 'x' WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -81,6 +84,7 @@ update by a column without index
UPDATE ta_l SET c = '2011-10-17' WHERE b = 'x';
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -92,6 +96,7 @@ update by primary key with order and limit
UPDATE ta_l SET c = ADDDATE(c, 1) WHERE a < 4 ORDER BY b DESC LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 4
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -103,6 +108,7 @@ delete by primary key with order and limit
DELETE FROM ta_l WHERE a < 4 ORDER BY c LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -113,6 +119,7 @@ delete by a column without index
DELETE FROM ta_l WHERE b = 'c';
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -122,6 +129,7 @@ delete by primary key
DELETE FROM ta_l WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
diff --git a/storage/spider/mysql-test/spider/handler/r/direct_update_part.result b/storage/spider/mysql-test/spider/handler/r/direct_update_part.result
index 6db7c01f563..7069cd72fda 100644
--- a/storage/spider/mysql-test/spider/handler/r/direct_update_part.result
+++ b/storage/spider/mysql-test/spider/handler/r/direct_update_part.result
@@ -38,6 +38,7 @@ PRIMARY KEY(a)
) MASTER_1_ENGINE MASTER_1_COMMENT2_P_2_1
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 0
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-01 10:21:39
@@ -49,6 +50,7 @@ update all rows with function
UPDATE ta_l2 SET c = ADDDATE(c, 1);
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -60,6 +62,7 @@ update by primary key
UPDATE ta_l2 SET b = 'x' WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -71,6 +74,7 @@ update by a column without index
UPDATE ta_l2 SET c = '2011-10-17' WHERE b = 'x';
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 5
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -82,6 +86,7 @@ update by primary key with order and limit
UPDATE ta_l2 SET c = ADDDATE(c, 1) WHERE a < 4 ORDER BY b DESC LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 6
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -93,6 +98,7 @@ delete by primary key with order and limit
DELETE FROM ta_l2 WHERE a < 4 ORDER BY c LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -103,6 +109,7 @@ delete by a column without index
DELETE FROM ta_l2 WHERE b = 'c';
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -112,6 +119,7 @@ delete by primary key
DELETE FROM ta_l2 WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 4
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
diff --git a/storage/spider/mysql-test/spider/handler/r/spider_fixes.result b/storage/spider/mysql-test/spider/handler/r/spider_fixes.result
index 9b14817eee4..c171167a1b7 100644
--- a/storage/spider/mysql-test/spider/handler/r/spider_fixes.result
+++ b/storage/spider/mysql-test/spider/handler/r/spider_fixes.result
@@ -465,6 +465,7 @@ Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
DELETE FROM t1;
Warnings:
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
+Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
TRUNCATE t1;
Warnings:
diff --git a/storage/spider/mysql-test/spider/r/direct_update.result b/storage/spider/mysql-test/spider/r/direct_update.result
index 74dae7aec2e..0e536d48617 100644
--- a/storage/spider/mysql-test/spider/r/direct_update.result
+++ b/storage/spider/mysql-test/spider/r/direct_update.result
@@ -48,6 +48,7 @@ direct_updating test
connection master_1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 0
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-01 10:21:39
@@ -59,6 +60,7 @@ update all rows with function
UPDATE ta_l SET c = ADDDATE(c, 1);
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -70,6 +72,7 @@ update by primary key
UPDATE ta_l SET b = 'x' WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -81,6 +84,7 @@ update by a column without index
UPDATE ta_l SET c = '2011-10-17' WHERE b = 'x';
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -92,6 +96,7 @@ update by primary key with order and limit
UPDATE ta_l SET c = ADDDATE(c, 1) WHERE a < 4 ORDER BY b DESC LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 4
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -103,6 +108,7 @@ delete by primary key with order and limit
DELETE FROM ta_l WHERE a < 4 ORDER BY c LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -113,6 +119,7 @@ delete by a column without index
DELETE FROM ta_l WHERE b = 'c';
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -122,6 +129,7 @@ delete by primary key
DELETE FROM ta_l WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
diff --git a/storage/spider/mysql-test/spider/r/direct_update_part.result b/storage/spider/mysql-test/spider/r/direct_update_part.result
index 6db7c01f563..7069cd72fda 100644
--- a/storage/spider/mysql-test/spider/r/direct_update_part.result
+++ b/storage/spider/mysql-test/spider/r/direct_update_part.result
@@ -38,6 +38,7 @@ PRIMARY KEY(a)
) MASTER_1_ENGINE MASTER_1_COMMENT2_P_2_1
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 0
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-01 10:21:39
@@ -49,6 +50,7 @@ update all rows with function
UPDATE ta_l2 SET c = ADDDATE(c, 1);
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 2
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -60,6 +62,7 @@ update by primary key
UPDATE ta_l2 SET b = 'x' WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -71,6 +74,7 @@ update by a column without index
UPDATE ta_l2 SET c = '2011-10-17' WHERE b = 'x';
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 5
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -82,6 +86,7 @@ update by primary key with order and limit
UPDATE ta_l2 SET c = ADDDATE(c, 1) WHERE a < 4 ORDER BY b DESC LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_update';
Variable_name Value
+Spider_direct_update 6
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -93,6 +98,7 @@ delete by primary key with order and limit
DELETE FROM ta_l2 WHERE a < 4 ORDER BY c LIMIT 1;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 1
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -103,6 +109,7 @@ delete by a column without index
DELETE FROM ta_l2 WHERE b = 'c';
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 3
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
@@ -112,6 +119,7 @@ delete by primary key
DELETE FROM ta_l2 WHERE a = 3;
SHOW STATUS LIKE 'Spider_direct_delete';
Variable_name Value
+Spider_direct_delete 4
SELECT a, b, date_format(c, '%Y-%m-%d %H:%i:%s') FROM ta_l2 ORDER BY a;
a b date_format(c, '%Y-%m-%d %H:%i:%s')
1 a 2008-08-02 10:21:39
diff --git a/storage/spider/mysql-test/spider/r/spider_fixes.result b/storage/spider/mysql-test/spider/r/spider_fixes.result
index f50c9822534..1db31ca9f95 100644
--- a/storage/spider/mysql-test/spider/r/spider_fixes.result
+++ b/storage/spider/mysql-test/spider/r/spider_fixes.result
@@ -461,6 +461,7 @@ Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
DELETE FROM t1;
Warnings:
Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
+Error 12702 Remote table 'auto_test_remote.ter1_1' is not found
Error 1146 Table 'auto_test_remote.ter1_1' doesn't exist
TRUNCATE t1;
Warnings:
diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc
index 5e00ae19ae8..50604db2f5a 100644
--- a/storage/spider/spd_conn.cc
+++ b/storage/spider/spd_conn.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_copy_tables.cc b/storage/spider/spd_copy_tables.cc
index 7e7845635af..c8bbd4b31a0 100644
--- a/storage/spider/spd_copy_tables.cc
+++ b/storage/spider/spd_copy_tables.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index 1da25e17b50..e5f5d5f6c9c 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_db_handlersocket.cc b/storage/spider/spd_db_handlersocket.cc
index 2ae84499aff..a28351891ac 100644
--- a/storage/spider/spd_db_handlersocket.cc
+++ b/storage/spider/spd_db_handlersocket.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index a41a943cd04..bd57f6712a9 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_db_oracle.cc b/storage/spider/spd_db_oracle.cc
index c8237f24f0f..0c9e84662bc 100644
--- a/storage/spider/spd_db_oracle.cc
+++ b/storage/spider/spd_db_oracle.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc
index ad7bd54c69f..99594763956 100644
--- a/storage/spider/spd_direct_sql.cc
+++ b/storage/spider/spd_direct_sql.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_environ.h b/storage/spider/spd_environ.h
new file mode 100644
index 00000000000..ff7afb1fdaf
--- /dev/null
+++ b/storage/spider/spd_environ.h
@@ -0,0 +1,37 @@
+/* Copyright (C) 2008-2015 Kentoku Shiba & 2017 MariaDB corp
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Define functionality offered by MySQL or MariaDB
+*/
+
+#ifndef SPD_ENVIRON_INCLUDED
+
+#if (defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000)
+#define SPIDER_HANDLER_START_BULK_INSERT_HAS_FLAGS
+#endif
+
+#if MYSQL_VERSION_ID >= 100204
+#define HANDLER_HAS_TOP_TABLE_FIELDS
+#define HANDLER_HAS_DIRECT_UPDATE_ROWS
+#define HANDLER_HAS_NEED_INFO_FOR_AUTO_INC
+#define HANDLER_HAS_CAN_USE_FOR_AUTO_INC_INIT
+#define PARTITION_HAS_EXTRA_ATTACH_CHILDREN
+#define PARTITION_HAS_GET_CHILD_HANDLERS
+#define PARTITION_HAS_EXTRA_ATTACH_CHILDREN
+#define PARTITION_HAS_GET_CHILD_HANDLERS
+#define HA_EXTRA_HAS_STARTING_ORDERED_INDEX_SCAN
+#endif
+#endif /* SPD_ENVIRON_INCLUDED */
diff --git a/storage/spider/spd_i_s.cc b/storage/spider/spd_i_s.cc
index 49824693984..207018c2ed7 100644
--- a/storage/spider/spd_i_s.cc
+++ b/storage/spider/spd_i_s.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_malloc.cc b/storage/spider/spd_malloc.cc
index ec71f9631bc..035fac41d43 100644
--- a/storage/spider/spd_malloc.cc
+++ b/storage/spider/spd_malloc.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_param.cc b/storage/spider/spd_param.cc
index 84d39b17897..d5f4d2813bf 100644
--- a/storage/spider/spd_param.cc
+++ b/storage/spider/spd_param.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_ping_table.cc b/storage/spider/spd_ping_table.cc
index f59f7760359..a0ceaf95f06 100644
--- a/storage/spider/spd_ping_table.cc
+++ b/storage/spider/spd_ping_table.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc
index 86ab5fdccf4..afcd30412e2 100644
--- a/storage/spider/spd_sys_table.cc
+++ b/storage/spider/spd_sys_table.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc
index fe682ce0be4..d414e09e3b4 100644
--- a/storage/spider/spd_table.cc
+++ b/storage/spider/spd_table.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>
diff --git a/storage/spider/spd_trx.cc b/storage/spider/spd_trx.cc
index 007e1fcba5d..ddbedcdcbda 100644
--- a/storage/spider/spd_trx.cc
+++ b/storage/spider/spd_trx.cc
@@ -15,6 +15,7 @@
#define MYSQL_SERVER 1
#include "mysql_version.h"
+#include "spd_environ.h"
#if MYSQL_VERSION_ID < 50500
#include "mysql_priv.h"
#include <mysql/plugin.h>