summaryrefslogtreecommitdiff
path: root/sql/ha_partition.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/ha_partition.cc')
-rw-r--r--sql/ha_partition.cc141
1 files changed, 139 insertions, 2 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 0b46ca4123c..4bc2b2439b4 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2076,6 +2076,133 @@ partition_element *ha_partition::find_partition_element(uint part_id)
return NULL;
}
+uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type)
+{
+ DBUG_ENTER("ha_partition::count_query_cache_dependant_tables");
+ /* Here we rely on the fact that all tables are of the same type */
+ uint8 type= m_file[0]->table_cache_type();
+ (*tables_type)|= type;
+ DBUG_PRINT("info", ("cnt: %u", (uint)m_tot_parts));
+ /*
+ We need save underlying tables only for HA_CACHE_TBL_ASKTRANSACT:
+ HA_CACHE_TBL_NONTRANSACT - because all changes goes through partition table
+ HA_CACHE_TBL_NOCACHE - because will not be cached
+ HA_CACHE_TBL_TRANSACT - QC need to know that such type present
+ */
+ DBUG_RETURN(type == HA_CACHE_TBL_ASKTRANSACT ? m_tot_parts : 0);
+}
+
+my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
+ char *key, uint key_len,
+ uint8 type,
+ Query_cache *cache,
+ Query_cache_block_table **block_table,
+ handler *file,
+ uint *n)
+{
+ DBUG_ENTER("ha_partition::reg_query_cache_dependant_table");
+ qc_engine_callback engine_callback;
+ ulonglong engine_data;
+ /* ask undelying engine */
+ if (!file->register_query_cache_table(thd, key,
+ key_len,
+ &engine_callback,
+ &engine_data))
+ {
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
+ key,
+ key + table_share->db.length + 1));
+ /*
+ As this can change from call to call, don't reset set
+ thd->lex->safe_to_cache_query
+ */
+ thd->query_cache_is_applicable= 0; // Query can't be cached
+ DBUG_RETURN(TRUE);
+ }
+ (++(*block_table))->n= ++(*n);
+ if (!cache->insert_table(key_len,
+ key, (*block_table),
+ table_share->db.length,
+ type,
+ engine_callback, engine_data,
+ FALSE))
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
+my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
+ Query_cache *cache,
+ Query_cache_block_table **block_table,
+ uint *n)
+{
+ char *name;
+ uint prefix_length= table_share->table_cache_key.length + 3;
+ uint num_parts= m_part_info->num_parts;
+ uint num_subparts= m_part_info->num_subparts;
+ uint i= 0;
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ char key[FN_REFLEN];
+
+ DBUG_ENTER("ha_partition::register_query_cache_dependant_tables");
+
+ /* see ha_partition::count_query_cache_dependant_tables */
+ if (m_file[0]->table_cache_type() != HA_CACHE_TBL_ASKTRANSACT)
+ DBUG_RETURN(FALSE); // nothing to register
+
+ /* prepare static part of the key */
+ memmove(key, table_share->table_cache_key.str,
+ table_share->table_cache_key.length);
+
+ name= key + table_share->table_cache_key.length - 1;
+ name[0]= name[2]= '#';
+ name[1]= 'P';
+ name+= 3;
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ uint part_len= strmov(name, part_elem->partition_name) - name;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> subpart_it(part_elem->subpartitions);
+ partition_element *sub_elem;
+ char *sname= name + part_len;
+ uint j= 0, part;
+ sname[0]= sname[3]= '#';
+ sname[1]= 'S';
+ sname[2]= 'P';
+ sname += 4;
+ do
+ {
+ sub_elem= subpart_it++;
+ part= i * num_subparts + j;
+ uint spart_len= strmov(sname, sub_elem->partition_name) - name + 1;
+ if (reg_query_cache_dependant_table(thd, key,
+ prefix_length + part_len + 4 +
+ spart_len,
+ m_file[part]->table_cache_type(),
+ cache,
+ block_table, m_file[part],
+ n))
+ DBUG_RETURN(TRUE);
+ } while (++j < num_subparts);
+ }
+ else
+ {
+ if (reg_query_cache_dependant_table(thd, key,
+ prefix_length + part_len + 1,
+ m_file[i]->table_cache_type(),
+ cache,
+ block_table, m_file[i],
+ n))
+ DBUG_RETURN(TRUE);
+ }
+ } while (++i < num_parts);
+ DBUG_PRINT("info", ("cnt: %u", (uint)m_tot_parts));
+ DBUG_RETURN(FALSE);
+}
+
/*
Set up table share object before calling create on underlying handler
@@ -3739,7 +3866,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
{
List_iterator<partition_element>
subpart_it(part_elem->subpartitions);
- partition_element __attribute__((unused)) *sub_elem;
+ partition_element *sub_elem __attribute__((unused));
uint j= 0, part;
do
{
@@ -6392,7 +6519,17 @@ ha_rows ha_partition::min_rows_for_estimate()
DBUG_ENTER("ha_partition::min_rows_for_estimate");
tot_used_partitions= bitmap_bits_set(&m_part_info->used_partitions);
- DBUG_ASSERT(tot_used_partitions);
+
+ /*
+ All partitions might have been left as unused during partition pruning
+ due to, for example, an impossible WHERE condition. Nonetheless, the
+ optimizer might still attempt to perform (e.g. range) analysis where an
+ estimate of the the number of rows is calculated using records_in_range.
+ Hence, to handle this and other possible cases, use zero as the minimum
+ number of rows to base the estimate on if no partition is being used.
+ */
+ if (!tot_used_partitions)
+ DBUG_RETURN(0);
/*
Allow O(log2(tot_partitions)) increase in number of used partitions.