summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorNikita Malyavin <nikitamalyavin@gmail.com>2018-06-21 16:46:11 +1000
committerSergei Golubchik <serg@mariadb.org>2018-12-20 08:06:55 +0100
commit6a73569f12fe4883937a669c262716c11d1f9238 (patch)
tree5697b7e4521418a90c2dbb75949df63512682994 /sql
parent7b2e2288e9f784ea12f85dc005503511fe99f796 (diff)
downloadmariadb-git-6a73569f12fe4883937a669c262716c11d1f9238.tar.gz
MDEV-16429: Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index))' fails upon attempt to update virtual column on partitioned versioned table
When using buffered sort in `UPDATE`, keyread is used. In this case, `TABLE::update_virtual_field` should be aborted, but it actually isn't, because it is called not with a top-level handler, but with the one that is actually going to access the disk. Here the problemm is issued with partitioning, so the solution is to recursively mark for keyread all the underlying partition handlers. * ha_partition: update keyread state for child partitions Closes #800
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_partition.cc104
-rw-r--r--sql/ha_partition.h6
-rw-r--r--sql/handler.h4
3 files changed, 75 insertions, 39 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 090102266f0..3eabc698fed 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -401,7 +401,7 @@ const char *ha_partition::table_type() const
ha_partition::~ha_partition()
{
- DBUG_ENTER("ha_partition::~ha_partition()");
+ DBUG_ENTER("ha_partition::~ha_partition");
if (m_new_partitions_share_refs.elements)
m_new_partitions_share_refs.delete_elements();
if (m_file != NULL)
@@ -616,7 +616,7 @@ int ha_partition::create_partitioning_metadata(const char *path,
const char *old_path,
int action_flag)
{
- DBUG_ENTER("ha_partition::create_partitioning_metadata()");
+ DBUG_ENTER("ha_partition::create_partitioning_metadata");
/*
We need to update total number of parts since we might write the handler
@@ -6811,6 +6811,24 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
}
+static int extra_cb(handler *h, void *operation)
+{
+ return h->extra(*(enum ha_extra_function*)operation);
+}
+
+
+static int start_keyread_cb(handler* h, void *p)
+{
+ return h->ha_start_keyread(*(uint*)p);
+}
+
+
+static int end_keyread_cb(handler* h, void *unused)
+{
+ return h->ha_end_keyread();
+}
+
+
/**
General function to prepare handler for certain behavior.
@@ -7131,11 +7149,12 @@ int ha_partition::extra(enum ha_extra_function operation)
switch (operation) {
/* Category 1), used by most handlers */
- case HA_EXTRA_KEYREAD:
case HA_EXTRA_NO_KEYREAD:
+ DBUG_RETURN(loop_partitions(end_keyread_cb, NULL));
+ case HA_EXTRA_KEYREAD:
case HA_EXTRA_FLUSH:
case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE:
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
case HA_EXTRA_PREPARE_FOR_RENAME:
case HA_EXTRA_FORCE_REOPEN:
DBUG_RETURN(loop_extra_alter(operation));
@@ -7147,7 +7166,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
{
if (!m_myisam)
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
break;
}
@@ -7171,7 +7190,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_PREPARE_FOR_DROP:
case HA_EXTRA_FLUSH_CACHE:
{
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
}
case HA_EXTRA_NO_READCHECK:
{
@@ -7203,7 +7222,7 @@ int ha_partition::extra(enum ha_extra_function operation)
m_extra_cache_size= 0;
m_extra_prepare_for_update= FALSE;
m_extra_cache_part_id= NO_CURRENT_PART_ID;
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
}
case HA_EXTRA_IGNORE_NO_KEY:
case HA_EXTRA_NO_IGNORE_NO_KEY:
@@ -7237,7 +7256,7 @@ int ha_partition::extra(enum ha_extra_function operation)
}
/* Category 7), used by federated handlers */
case HA_EXTRA_INSERT_WITH_UPDATE:
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
/* Category 8) Operations only used by NDB */
case HA_EXTRA_DELETE_CANNOT_BATCH:
case HA_EXTRA_UPDATE_CANNOT_BATCH:
@@ -7264,7 +7283,7 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_BEGIN_ALTER_COPY:
case HA_EXTRA_END_ALTER_COPY:
case HA_EXTRA_FAKE_START_STMT:
- DBUG_RETURN(loop_extra(operation));
+ DBUG_RETURN(loop_partitions(extra_cb, &operation));
default:
{
/* Temporary crash to discover what is wrong */
@@ -7309,24 +7328,40 @@ int ha_partition::reset(void)
}
/*
- Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter
+ Special extra method with additional parameter
+ See @ref ha_partition::extra
- SYNOPSIS
- extra_opt()
- operation Must be HA_EXTRA_CACHE
- cachesize Size of cache in full table scan
+ @param[in] operation operation to execute
+ @param[in] arg extra argument
- RETURN VALUE
- >0 Error code
- 0 Success
+ @return status
+ @retval 0 success
+ @retval >0 error code
+
+ @detail
+ Operations supported by extra_opt:
+ HA_EXTRA_KEYREAD:
+ arg is interpreted as key index
+ HA_EXTRA_CACHE:
+ arg is interpreted as size of cache in full table scan
+
+ For detailed description refer to @ref ha_partition::extra
*/
-int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
+int ha_partition::extra_opt(enum ha_extra_function operation, ulong arg)
{
- DBUG_ENTER("ha_partition::extra_opt()");
+ DBUG_ENTER("ha_partition::extra_opt");
- DBUG_ASSERT(HA_EXTRA_CACHE == operation);
- prepare_extra_cache(cachesize);
+ switch (operation)
+ {
+ case HA_EXTRA_KEYREAD:
+ DBUG_RETURN(loop_partitions(start_keyread_cb, &arg));
+ case HA_EXTRA_CACHE:
+ prepare_extra_cache(arg);
+ DBUG_RETURN(0);
+ default:
+ DBUG_ASSERT(0);
+ }
DBUG_RETURN(0);
}
@@ -7344,7 +7379,7 @@ int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize)
void ha_partition::prepare_extra_cache(uint cachesize)
{
- DBUG_ENTER("ha_partition::prepare_extra_cache()");
+ DBUG_ENTER("ha_partition::prepare_extra_cache");
DBUG_PRINT("info", ("cachesize %u", cachesize));
m_extra_cache= TRUE;
@@ -7374,7 +7409,7 @@ int ha_partition::loop_extra_alter(enum ha_extra_function operation)
{
int result= 0, tmp;
handler **file;
- DBUG_ENTER("ha_partition::loop_extra_alter()");
+ DBUG_ENTER("ha_partition::loop_extra_alter");
DBUG_ASSERT(operation == HA_EXTRA_PREPARE_FOR_RENAME ||
operation == HA_EXTRA_FORCE_REOPEN);
@@ -7390,34 +7425,33 @@ int ha_partition::loop_extra_alter(enum ha_extra_function operation)
if ((tmp= (*file)->extra(operation)))
result= tmp;
}
- if ((tmp= loop_extra(operation)))
+ if ((tmp= loop_partitions(extra_cb, &operation)))
result= tmp;
DBUG_RETURN(result);
}
-/*
- Call extra on all partitions
+/**
+ Call callback(part, param) on all partitions
- SYNOPSIS
- loop_extra()
- operation extra operation type
+ @param callback a callback to call for each partition
+ @param param a void*-parameter passed to callback
- RETURN VALUE
- >0 Error code
- 0 Success
+ @return Operation status
+ @retval >0 Error code
+ @retval 0 Success
*/
-int ha_partition::loop_extra(enum ha_extra_function operation)
+int ha_partition::loop_partitions(handler_callback callback, void *param)
{
int result= 0, tmp;
uint i;
- DBUG_ENTER("ha_partition::loop_extra()");
+ DBUG_ENTER("ha_partition::loop_partitions");
for (i= bitmap_get_first_set(&m_part_info->lock_partitions);
i < m_tot_parts;
i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
{
- if ((tmp= m_file[i]->extra(operation)))
+ if ((tmp= callback(m_file[i], param)))
result= tmp;
}
/* Add all used partitions to be called in reset(). */
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index e348da08443..9a73eeff817 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -649,7 +649,7 @@ public:
void get_dynamic_partition_info(PARTITION_STATS *stat_info,
uint part_id);
virtual int extra(enum ha_extra_function operation);
- virtual int extra_opt(enum ha_extra_function operation, ulong cachesize);
+ virtual int extra_opt(enum ha_extra_function operation, ulong arg);
virtual int reset(void);
virtual uint count_query_cache_dependant_tables(uint8 *tables_type);
virtual my_bool
@@ -659,6 +659,8 @@ public:
uint *n);
private:
+ typedef int handler_callback(handler *, void *);
+
my_bool reg_query_cache_dependant_table(THD *thd,
char *engine_key,
uint engine_key_len,
@@ -669,7 +671,7 @@ private:
**block_table,
handler *file, uint *n);
static const uint NO_CURRENT_PART_ID;
- int loop_extra(enum ha_extra_function operation);
+ int loop_partitions(handler_callback callback, void *param);
int loop_extra_alter(enum ha_extra_function operations);
void late_extra_cache(uint partition_id);
void late_extra_no_cache(uint partition_id);
diff --git a/sql/handler.h b/sql/handler.h
index ed2ef822c88..d34064811f0 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2874,7 +2874,7 @@ public:
bool keyread_enabled() { return keyread < MAX_KEY; }
int ha_start_keyread(uint idx)
{
- int res= keyread_enabled() ? 0 : extra(HA_EXTRA_KEYREAD);
+ int res= keyread_enabled() ? 0 : extra_opt(HA_EXTRA_KEYREAD, idx);
keyread= idx;
return res;
}
@@ -3278,7 +3278,7 @@ public:
uint part_id);
virtual int extra(enum ha_extra_function operation)
{ return 0; }
- virtual int extra_opt(enum ha_extra_function operation, ulong cache_size)
+ virtual int extra_opt(enum ha_extra_function operation, ulong arg)
{ return extra(operation); }
/**