summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMichael Widenius <monty@askmonty.org>2013-07-03 22:50:34 +0300
committerMichael Widenius <monty@askmonty.org>2013-07-03 22:50:34 +0300
commita9d1d76e5eb33d7d7762140625d09bbfb9ae91bb (patch)
tree8bc0efc14fbc8ff396dec2a802545bfc8ec7c6ca /sql
parent68262ba648886e2d5f35fc85e3cc45df7ffd9ae5 (diff)
downloadmariadb-git-a9d1d76e5eb33d7d7762140625d09bbfb9ae91bb.tar.gz
Fixed issues with partitions and create temporary table SELECT ...
Merged all ddl_logging code. Merged sql_partition.cc innodb_mysql_lock2.test and partition_cache.test now works. Changed interface to strconvert() to make it easier to use with not \0 terminated strings. sql/sql_partition.cc: Full merge with 5.6 sql/sql_table.cc: Merged all ddl_logging code sql/strfunc.cc: Added from_length argument to strconvert() to make it possible to use without end terminated strings. sql/strfunc.h: Added from_length argument to strconvert() to make it possible to use without end terminated strings.
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_ndbcluster.cc62
-rw-r--r--sql/ha_partition.cc105
-rw-r--r--sql/ha_partition.h5
-rw-r--r--sql/innodb_priv.h2
-rw-r--r--sql/item_timefunc.cc4
-rw-r--r--sql/rpl_mi.cc6
-rw-r--r--sql/sql_base.cc19
-rw-r--r--sql/sql_cache.cc120
-rw-r--r--sql/sql_cache.h6
-rw-r--r--sql/sql_partition.cc279
-rw-r--r--sql/sql_table.cc577
-rw-r--r--sql/strfunc.cc11
-rw-r--r--sql/strfunc.h2
13 files changed, 662 insertions, 536 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 287bcb0529c..9524a0366d3 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -8114,23 +8114,33 @@ uint8 ha_ndbcluster::table_cache_type()
}
-uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
+/**
+ Retrieve the commit count for the table object.
+
+ @param thd Thread context.
+ @param norm_name Normalized path to the table.
+ @param[out] commit_count Commit count for the table.
+
+ @return 0 on success.
+ @return 1 if an error occured.
+*/
+
+uint ndb_get_commitcount(THD *thd, char *norm_name,
Uint64 *commit_count)
{
- char name[FN_REFLEN + 1];
+ char dbname[NAME_LEN + 1];
NDB_SHARE *share;
DBUG_ENTER("ndb_get_commitcount");
- build_table_filename(name, sizeof(name) - 1,
- dbname, tabname, "", 0);
- DBUG_PRINT("enter", ("name: %s", name));
- mysql_mutex_lock(&ndbcluster_mutex);
+ DBUG_PRINT("enter", ("name: %s", norm_name));
+ pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
- (uchar*) name,
- strlen(name))))
+ (const uchar*) norm_name,
+ strlen(norm_name))))
{
- mysql_mutex_unlock(&ndbcluster_mutex);
- DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name));
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables",
+ norm_name));
DBUG_RETURN(1);
}
/* ndb_share reference temporary, free below */
@@ -8162,6 +8172,8 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
Ndb *ndb;
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(1);
+
+ ha_ndbcluster::set_dbname(norm_name, dbname);
if (ndb->setDatabaseName(dbname))
{
ERR_RETURN(ndb->getNdbError());
@@ -8171,7 +8183,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
struct Ndb_statistics stat;
{
- Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
+ char tblname[NAME_LEN + 1];
+ ha_ndbcluster::set_tabname(norm_name, tblname);
+ Ndb_table_guard ndbtab_g(ndb->getDictionary(), tblname);
if (ndbtab_g.get_table() == 0
|| ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat))
{
@@ -8221,10 +8235,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
@param thd thread handle
- @param full_name concatenation of database name,
- the null character '\\0', and the table name
- @param full_name_len length of the full name,
- i.e. len(dbname) + len(tablename) + 1
+ @param full_name normalized path to the table in the canonical
+ format.
+ @param full_name_len length of the normalized path to the table.
@param engine_data parameter retrieved when query was first inserted into
the cache. If the value of engine_data is changed,
all queries for this table should be invalidated.
@@ -8243,11 +8256,15 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
ulonglong *engine_data)
{
Uint64 commit_count;
- char *dbname= full_name;
- char *tabname= dbname+strlen(dbname)+1;
+ char dbname[NAME_LEN + 1];
+ char tabname[NAME_LEN + 1];
#ifndef DBUG_OFF
char buff[22], buff2[22];
#endif
+
+ ha_ndbcluster::set_dbname(full_name, dbname);
+ ha_ndbcluster::set_tabname(full_name, tabname);
+
DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
DBUG_PRINT("enter", ("dbname: %s, tabname: %s", dbname, tabname));
@@ -8257,7 +8274,7 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
DBUG_RETURN(FALSE);
}
- if (ndb_get_commitcount(thd, dbname, tabname, &commit_count))
+ if (ndb_get_commitcount(thd, full_name, &commit_count))
{
*engine_data= 0; /* invalidate */
DBUG_PRINT("exit", ("No, could not retrieve commit_count"));
@@ -8292,10 +8309,9 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
the cached query is reused.
@param thd thread handle
- @param full_name concatenation of database name,
- the null character '\\0', and the table name
- @param full_name_len length of the full name,
- i.e. len(dbname) + len(tablename) + 1
+ @param full_name normalized path to the table in the
+ canonical format.
+ @param full_name_len length of the normalized path to the table.
@param engine_callback function to be called before using cache on
this table
@param[out] engine_data commit_count for this table
@@ -8325,7 +8341,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
DBUG_RETURN(FALSE);
}
- if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
+ if (ndb_get_commitcount(thd, full_name, &commit_count))
{
*engine_data= 0;
DBUG_PRINT("exit", ("Error, could not get commitcount"));
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 283f4b77652..ed7d505f1a7 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2304,26 +2304,27 @@ uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type)
DBUG_RETURN(type == HA_CACHE_TBL_ASKTRANSACT ? m_tot_parts : 0);
}
-my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
- char *key, uint key_len,
- uint8 type,
- Query_cache *cache,
- Query_cache_block_table **block_table,
- handler *file,
- uint *n)
+my_bool ha_partition::
+reg_query_cache_dependant_table(THD *thd,
+ char *engine_key, uint engine_key_len,
+ char *cache_key, uint cache_key_len,
+ uint8 type,
+ Query_cache *cache,
+ Query_cache_block_table **block_table,
+ handler *file,
+ uint *n)
{
DBUG_ENTER("ha_partition::reg_query_cache_dependant_table");
qc_engine_callback engine_callback;
ulonglong engine_data;
/* ask undelying engine */
- if (!file->register_query_cache_table(thd, key,
- key_len,
+ if (!file->register_query_cache_table(thd, engine_key,
+ engine_key_len,
&engine_callback,
&engine_data))
{
- DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
- key,
- key + table_share->db.length + 1));
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s",
+ engine_key_len, engine_key));
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
@@ -2332,9 +2333,11 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
DBUG_RETURN(TRUE);
}
(++(*block_table))->n= ++(*n);
- if (!cache->insert_table(key_len,
- key, (*block_table),
+ if (!cache->insert_table(cache_key_len,
+ cache_key, (*block_table),
table_share->db.length,
+ (uint8) (cache_key_len -
+ table_share->table_cache_key.length),
type,
engine_callback, engine_data,
FALSE))
@@ -2343,19 +2346,19 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
}
-my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
- Query_cache *cache,
- Query_cache_block_table **block_table,
- uint *n)
+my_bool ha_partition::
+register_query_cache_dependant_tables(THD *thd,
+ Query_cache *cache,
+ Query_cache_block_table **block_table,
+ uint *n)
{
- char *name;
- uint prefix_length= table_share->table_cache_key.length + 3;
+ char *engine_key_end, *query_cache_key_end;
+ uint i;
uint num_parts= m_part_info->num_parts;
uint num_subparts= m_part_info->num_subparts;
- uint i= 0;
+ int diff_length;
List_iterator<partition_element> part_it(m_part_info->partitions);
- char key[FN_REFLEN];
-
+ char engine_key[FN_REFLEN], query_cache_key[FN_REFLEN];
DBUG_ENTER("ha_partition::register_query_cache_dependant_tables");
/* see ha_partition::count_query_cache_dependant_tables */
@@ -2363,36 +2366,51 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
DBUG_RETURN(FALSE); // nothing to register
/* prepare static part of the key */
- memmove(key, table_share->table_cache_key.str,
- table_share->table_cache_key.length);
+ memcpy(engine_key, table_share->normalized_path.str,
+ table_share->normalized_path.length);
+ memcpy(query_cache_key, table_share->table_cache_key.str,
+ table_share->table_cache_key.length);
- name= key + table_share->table_cache_key.length - 1;
- name[0]= name[2]= '#';
- name[1]= 'P';
- name+= 3;
+ diff_length= ((int) table_share->table_cache_key.length -
+ (int) table_share->normalized_path.length -1);
+ engine_key_end= engine_key + table_share->normalized_path.length;
+ query_cache_key_end= query_cache_key + table_share->table_cache_key.length -1;
+
+ engine_key_end[0]= engine_key_end[2]= query_cache_key_end[0]=
+ query_cache_key_end[2]= '#';
+ query_cache_key_end[1]= engine_key_end[1]= 'P';
+ engine_key_end+= 3;
+ query_cache_key_end+= 3;
+
+ i= 0;
do
{
partition_element *part_elem= part_it++;
- uint part_len= strmov(name, part_elem->partition_name) - name;
+ char *engine_pos= strmov(engine_key_end, part_elem->partition_name);
if (m_is_sub_partitioned)
{
List_iterator<partition_element> subpart_it(part_elem->subpartitions);
partition_element *sub_elem;
- char *sname= name + part_len;
uint j= 0, part;
- sname[0]= sname[3]= '#';
- sname[1]= 'S';
- sname[2]= 'P';
- sname += 4;
+ engine_pos[0]= engine_pos[3]= '#';
+ engine_pos[1]= 'S';
+ engine_pos[2]= 'P';
+ engine_pos += 4;
do
{
+ char *end;
+ uint length;
sub_elem= subpart_it++;
part= i * num_subparts + j;
- uint spart_len= strmov(sname, sub_elem->partition_name) - name + 1;
- if (reg_query_cache_dependant_table(thd, key,
- prefix_length + part_len + 4 +
- spart_len,
+ /* we store the end \0 as part of the key */
+ end= strmov(engine_pos, sub_elem->partition_name);
+ length= end - engine_key;
+ /* Copy the suffix also to query cache key */
+ memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
+ if (reg_query_cache_dependant_table(thd, engine_key, length,
+ query_cache_key,
+ length + diff_length,
m_file[part]->table_cache_type(),
cache,
block_table, m_file[part],
@@ -2402,8 +2420,13 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
}
else
{
- if (reg_query_cache_dependant_table(thd, key,
- prefix_length + part_len + 1,
+ char *end= engine_pos+1; // copy end \0
+ uint length= end - engine_key;
+ /* Copy the suffix also to query cache key */
+ memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
+ if (reg_query_cache_dependant_table(thd, engine_key, length,
+ query_cache_key,
+ length + diff_length,
m_file[i]->table_cache_type(),
cache,
block_table, m_file[i],
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 05b285c0eb9..eeb2ee5dec5 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -643,7 +643,10 @@ public:
private:
my_bool reg_query_cache_dependant_table(THD *thd,
- char *key, uint key_len, uint8 type,
+ char *engine_key,
+ uint engine_key_len,
+ char *query_key, uint query_key_len,
+ uint8 type,
Query_cache *cache,
Query_cache_block_table
**block_table,
diff --git a/sql/innodb_priv.h b/sql/innodb_priv.h
index 5406c292b18..d6f7c90e93e 100644
--- a/sql/innodb_priv.h
+++ b/sql/innodb_priv.h
@@ -26,7 +26,7 @@ int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
bool schema_table_store_record(THD *thd, TABLE *table);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
bool check_global_access(THD *thd, ulong want_access);
-uint strconvert(CHARSET_INFO *from_cs, const char *from,
+uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length,
uint *errors);
void sql_print_error(const char *format, ...);
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 8177a019386..e599292f67e 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -417,8 +417,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
if (((fuzzy_date & TIME_NO_ZERO_IN_DATE) &&
(l_time->year == 0 || l_time->month == 0 || l_time->day == 0)) ||
- (fuzzy_date & TIME_NO_ZERO_DATE) &&
- (l_time->year == 0 && l_time->month == 0 && l_time->day == 0))
+ ((fuzzy_date & TIME_NO_ZERO_DATE) &&
+ (l_time->year == 0 && l_time->month == 0 && l_time->day == 0)))
goto err;
if (val != val_end)
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index ad058fa4933..7b363fbaf7d 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -687,15 +687,15 @@ void create_logfile_name_with_suffix(char *res_file_name, uint length,
{
const char *info_file_end= info_file + (p - res_file_name);
const char *ext= append ? info_file_end : fn_ext2(info_file);
- size_t res_length, ext_pos;
+ size_t res_length, ext_pos, from_length;
uint errors;
/* Create null terminated string */
- strmake(buff, suffix->str, suffix->length);
+ from_length= strmake(buff, suffix->str, suffix->length) - buff;
/* Convert to lower case */
my_casedn_str(system_charset_info, buff);
/* Convert to characters usable in a file name */
- res_length= strconvert(system_charset_info, buff,
+ res_length= strconvert(system_charset_info, buff, from_length,
&my_charset_filename, res, sizeof(res), &errors);
ext_pos= (size_t) (ext - info_file);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index acc4f09cec4..5cab0c251fe 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -4575,9 +4575,24 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
tables->db, tables->table_name, tables)); //psergey: invalid read of size 1 here
(*counter)++;
- /* Not a placeholder: must be a base table or a view. Let us open it. */
- DBUG_ASSERT(!tables->table);
+ /* Check if we are trying to create a temporary table */
+ if (tables->open_type == OT_TEMPORARY_ONLY)
+ {
+ /*
+ OT_TEMPORARY_ONLY means that we are in CREATE TEMPORARY TABLE statement.
+ Also such table list element can't correspond to prelocking placeholder
+ or to underlying table of merge table.
+ So existing temporary table should have been preopened by this moment
+ and we can simply continue without trying to open temporary or base
+ table.
+ */
+ DBUG_ASSERT(tables->open_strategy);
+ DBUG_ASSERT(!tables->prelocking_placeholder);
+ DBUG_ASSERT(!tables->parent_l);
+ DBUG_RETURN(0);
+ }
+ /* Not a placeholder: must be a base table or a view. Let us open it. */
if (tables->prelocking_placeholder)
{
/*
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 3dd57562e08..73bdf9fa984 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -336,6 +336,7 @@ TODO list:
#include "sql_acl.h" // SELECT_ACL
#include "sql_base.h" // TMP_TABLE_KEY_EXTRA
#include "debug_sync.h" // DEBUG_SYNC
+#include "sql_table.h"
#ifdef HAVE_QUERY_CACHE
#include <m_ctype.h>
#include <my_dir.h>
@@ -345,6 +346,7 @@ TODO list:
#include "probes_mysql.h"
#include "log_slow.h"
#include "transaction.h"
+#include "strfunc.h"
const uchar *query_state_map;
@@ -1636,6 +1638,41 @@ send_data_in_chunks(NET *net, const uchar *packet, ulong len)
#endif
+/**
+ Build a normalized table name suitable for query cache engine callback
+
+ This consist of normalized directory '/' normalized_file_name
+ followed by suffix.
+ Suffix is needed for partitioned tables.
+*/
+
+size_t build_normalized_name(char *buff, size_t bufflen,
+ const char *db, size_t db_len,
+ const char *table_name, size_t table_len,
+ size_t suffix_len)
+{
+ uint errors;
+ size_t length;
+ char *pos= buff, *end= buff+bufflen;
+ DBUG_ENTER("build_normalized_name");
+
+ (*pos++)= FN_LIBCHAR;
+ length= strconvert(system_charset_info, db, db_len,
+ &my_charset_filename, pos, bufflen - 3,
+ &errors);
+ pos+= length;
+ (*pos++)= FN_LIBCHAR;
+ length= strconvert(system_charset_info, table_name, table_len,
+ &my_charset_filename, pos, (uint) (end - pos),
+ &errors);
+ pos+= length;
+ if (pos + suffix_len < end)
+ pos= strmake(pos, table_name + table_len, suffix_len);
+
+ DBUG_RETURN((size_t) (pos - buff));
+}
+
+
/*
Check if the query is in the cache. If it was cached, send it
to the user.
@@ -2011,35 +2048,50 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
engine_data= table->engine_data();
- if (table->callback() &&
- !(*table->callback())(thd, table->db(),
- table->key_length(),
- &engine_data))
+ if (table->callback())
{
- DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
- table_list.db, table_list.alias));
- BLOCK_UNLOCK_RD(query_block);
- if (engine_data != table->engine_data())
+ char qcache_se_key_name[FN_REFLEN + 10];
+ uint qcache_se_key_len, db_length= strlen(table->db());
+ engine_data= table->engine_data();
+
+ qcache_se_key_len= build_normalized_name(qcache_se_key_name,
+ sizeof(qcache_se_key_name),
+ table->db(),
+ db_length,
+ table->table(),
+ table->key_length() -
+ db_length - 2 -
+ table->suffix_length(),
+ table->suffix_length());
+
+ if (!(*table->callback())(thd, qcache_se_key_name,
+ qcache_se_key_len, &engine_data))
{
- DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lu-%lu",
- table_list.db, table_list.alias,
- (ulong) engine_data, (ulong) table->engine_data()));
- invalidate_table_internal(thd,
- (uchar *) table->db(),
- table->key_length());
- }
- else
- {
- /*
- As this can change from call to call, don't reset set
- thd->lex->safe_to_cache_query
- */
- thd->query_cache_is_applicable= 0; // Query can't be cached
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s",
+ qcache_se_key_len, qcache_se_key_name));
+ BLOCK_UNLOCK_RD(query_block);
+ if (engine_data != table->engine_data())
+ {
+ DBUG_PRINT("qcache",
+ ("Handler require invalidation queries of %.*s %lu-%lu",
+ qcache_se_key_len, qcache_se_key_name,
+ (ulong) engine_data, (ulong) table->engine_data()));
+ invalidate_table_internal(thd,
+ (uchar *) table->db(),
+ table->key_length());
+ }
+ else
+ {
+ /*
+ As this can change from call to call, don't reset set
+ thd->lex->safe_to_cache_query
+ */
+ thd->query_cache_is_applicable= 0; // Query can't be cached
+ }
+ /* End the statement transaction potentially started by engine. */
+ trans_rollback_stmt(thd);
+ goto err_unlock; // Parse query
}
- /* End the statement transaction potentially started by engine. */
- trans_rollback_stmt(thd);
- goto err_unlock; // Parse query
}
else
DBUG_PRINT("qcache", ("handler allow caching %s,%s",
@@ -3257,7 +3309,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
There are not callback function for for VIEWs
*/
if (!insert_table(key_length, key, (*block_table),
- tables_used->view_db.length + 1,
+ tables_used->view_db.length + 1, 0,
HA_CACHE_TBL_NONTRANSACT, 0, 0, TRUE))
DBUG_RETURN(0);
/*
@@ -3278,7 +3330,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
if (!insert_table(tables_used->table->s->table_cache_key.length,
tables_used->table->s->table_cache_key.str,
(*block_table),
- tables_used->db_length,
+ tables_used->db_length, 0,
tables_used->table->file->table_cache_type(),
tables_used->callback_func,
tables_used->engine_data,
@@ -3343,7 +3395,8 @@ my_bool Query_cache::register_all_tables(THD *thd,
my_bool
Query_cache::insert_table(uint key_len, char *key,
Query_cache_block_table *node,
- uint32 db_length, uint8 cache_type,
+ uint32 db_length, uint8 suffix_length_arg,
+ uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data,
my_bool hash)
@@ -3418,6 +3471,7 @@ Query_cache::insert_table(uint key_len, char *key,
char *db= header->db();
header->table(db + db_length + 1);
header->key_length(key_len);
+ header->suffix_length(suffix_length_arg);
header->type(cache_type);
header->callback(callback);
header->engine_data(engine_data);
@@ -4041,13 +4095,13 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
continue;
handler= table->file;
if (!handler->register_query_cache_table(thd,
- table->s->table_cache_key.str,
- table->s->table_cache_key.length,
+ table->s->normalized_path.str,
+ table->s->normalized_path.length,
&tables_used->callback_func,
&tables_used->engine_data))
{
- DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
- tables_used->db, tables_used->alias));
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %s",
+ table->s->normalized_path.str));
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index 7444d444cf9..2d6392911f1 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -190,6 +190,7 @@ struct Query_cache_table
Query_cache_table() {} /* Remove gcc warning */
char *tbl;
uint32 key_len;
+ uint8 suffix_len; /* For partitioned tables */
uint8 table_type;
/* unique for every engine reference */
qc_engine_callback callback_func;
@@ -210,6 +211,8 @@ struct Query_cache_table
inline void table(char *table_arg) { tbl= table_arg; }
inline uint32 key_length() { return key_len; }
inline void key_length(uint32 len) { key_len= len; }
+ inline uint8 suffix_length() { return suffix_len; }
+ inline void suffix_length(uint8 len) { suffix_len= len; }
inline uint8 type() { return table_type; }
inline void type(uint8 t) { table_type= t; }
inline qc_engine_callback callback() { return callback_func; }
@@ -490,7 +493,8 @@ protected:
unsigned pkt_nr);
my_bool insert_table(uint key_len, char *key,
Query_cache_block_table *node,
- uint32 db_length, uint8 cache_type,
+ uint32 db_length, uint8 suffix_length_arg,
+ uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data,
my_bool hash);
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 05ce5bdb4ce..aff92331bd9 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -1,5 +1,5 @@
-/* Copyright (c) 2005, 2011, Oracle and/or its affiliates.
- Copyright (c) 2009-2011, Monty Program Ab
+/* Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2009-2013, Monty Program Ab & SkySQL Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -12,7 +12,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
/*
This file is a container for general functionality related
@@ -70,6 +70,7 @@
#include "sql_analyse.h" // append_escaped
#include "sql_alter.h" // Alter_table_ctx
+#include <algorithm>
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -190,7 +191,7 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val,
item New converted item
*/
-Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
+Item* convert_charset_partition_constant(Item *item, const CHARSET_INFO *cs)
{
THD *thd= current_thd;
Name_resolution_context *context= &thd->lex->current_select->context;
@@ -208,21 +209,18 @@ Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
}
-/*
- A support function to check if a name is in a list of strings
+/**
+ A support function to check if a name is in a list of strings.
- SYNOPSIS
- is_name_in_list()
- name String searched for
- list_names A list of names searched in
+ @param name String searched for
+ @param list_names A list of names searched in
- RETURN VALUES
- TRUE String found
- FALSE String not found
+ @return True if if the name is in the list.
+ @retval true String found
+ @retval false String not found
*/
-bool is_name_in_list(char *name,
- List<char> list_names)
+static bool is_name_in_list(char *name, List<char> list_names)
{
List_iterator<char> names_it(list_names);
uint num_names= list_names.elements;
@@ -289,61 +287,6 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
/*
- Check that the reorganized table will not have duplicate partitions.
-
- SYNOPSIS
- check_reorganise_list()
- new_part_info New partition info
- old_part_info Old partition info
- list_part_names The list of partition names that will go away and
- can be reused in the new table.
-
- RETURN VALUES
- TRUE Inacceptable name conflict detected.
- FALSE New names are OK.
-
- DESCRIPTION
- Can handle that the 'new_part_info' and 'old_part_info' the same
- in which case it checks that the list of names in the partitions
- doesn't contain any duplicated names.
-*/
-
-bool check_reorganise_list(partition_info *new_part_info,
- partition_info *old_part_info,
- List<char> list_part_names)
-{
- uint new_count, old_count;
- uint num_new_parts= new_part_info->partitions.elements;
- uint num_old_parts= old_part_info->partitions.elements;
- List_iterator<partition_element> new_parts_it(new_part_info->partitions);
- bool same_part_info= (new_part_info == old_part_info);
- DBUG_ENTER("check_reorganise_list");
-
- new_count= 0;
- do
- {
- List_iterator<partition_element> old_parts_it(old_part_info->partitions);
- char *new_name= (new_parts_it++)->partition_name;
- new_count++;
- old_count= 0;
- do
- {
- char *old_name= (old_parts_it++)->partition_name;
- old_count++;
- if (same_part_info && old_count == new_count)
- break;
- if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
- {
- if (!is_name_in_list(old_name, list_part_names))
- DBUG_RETURN(TRUE);
- }
- } while (old_count < num_old_parts);
- } while (new_count < num_new_parts);
- DBUG_RETURN(FALSE);
-}
-
-
-/*
A useful routine used by update_row for partition handlers to calculate
the partition ids of the old and the new record.
@@ -888,8 +831,7 @@ static bool handle_list_of_fields(List_iterator<char> it,
uint primary_key= table->s->primary_key;
if (primary_key != MAX_KEY)
{
- uint num_key_parts= table->key_info[primary_key].user_defined_key_parts;
- uint i;
+ uint num_key_parts= table->key_info[primary_key].user_defined_key_parts, i;
/*
In the case of an empty list we use primary key as partition key.
*/
@@ -1265,7 +1207,7 @@ void check_range_capable_PF(TABLE *table)
and initialise it.
*/
-static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
+static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info)
{
uint32 *bitmap_buf;
uint bitmap_bits= part_info->num_subparts?
@@ -1576,7 +1518,7 @@ bool field_is_partition_charset(Field *field)
!(field->type() == MYSQL_TYPE_VARCHAR))
return FALSE;
{
- CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ const CHARSET_INFO *cs= field->charset();
if (!(field->type() == MYSQL_TYPE_STRING) ||
!(cs->state & MY_CS_BINSORT))
return TRUE;
@@ -1619,7 +1561,7 @@ bool check_part_func_fields(Field **ptr, bool ok_with_charsets)
*/
if (field_is_partition_charset(field))
{
- CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ const CHARSET_INFO *cs= field->charset();
if (!ok_with_charsets ||
cs->mbmaxlen > 1 ||
cs->strxfrm_multiply > 1)
@@ -1808,7 +1750,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
(table->s->db_type()->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
check_unique_keys(table)))
goto end;
- if (unlikely(set_up_partition_bitmap(thd, part_info)))
+ if (unlikely(set_up_partition_bitmaps(thd, part_info)))
goto end;
if (unlikely(part_info->set_up_charset_field_preps()))
{
@@ -1824,6 +1766,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
set_up_range_analysis_info(part_info);
+ table->file->set_part_info(part_info);
result= FALSE;
end:
thd->mark_used_columns= save_mark_used_columns;
@@ -2030,35 +1973,40 @@ void truncate_partition_filename(char *path)
/**
@brief Output a filepath. Similar to add_keyword_string except it
- also converts \ to / on Windows and skips the partition file name at
- the end if found.
-
- @note
-
- When Mysql sends a DATA DIRECTORY from SQL for partitions it does
- not use a file name, but it does for DATA DIRECTORY on a non-partitioned
- table. So when the storage engine is asked for the DATA DIRECTORY string
- after a restart through Handler::update_create_options(), the storage
- engine may include the filename.
+also converts \ to / on Windows and skips the partition file name at
+the end if found.
+
+ @note When Mysql sends a DATA DIRECTORY from SQL for partitions it does
+not use a file name, but it does for DATA DIRECTORY on a non-partitioned
+table. So when the storage engine is asked for the DATA DIRECTORY string
+after a restart through Handler::update_create_options(), the storage
+engine may include the filename.
*/
-
static int add_keyword_path(File fptr, const char *keyword,
const char *path)
{
- char temp_path[FN_REFLEN];
int err= add_string(fptr, keyword);
err+= add_space(fptr);
err+= add_equal(fptr);
err+= add_space(fptr);
- strmake(temp_path, path, sizeof(temp_path)-1);
+ char temp_path[FN_REFLEN];
+ strcpy(temp_path, path);
+#ifdef __WIN__
/* Convert \ to / to be able to create table on unix */
- to_unix_path(temp_path);
+ char *pos, *end;
+ uint length= strlen(temp_path);
+ for (pos= temp_path, end= pos+length ; pos < end ; pos++)
+ {
+ if (*pos == '\\')
+ *pos = '/';
+ }
+#endif
/*
- If the partition file name with its "#P#" identifier
- is found after the last slash, truncate that filename.
+ If the partition file name with its "#P#" identifier
+ is found after the last slash, truncate that filename.
*/
truncate_partition_filename(temp_path);
@@ -2067,9 +2015,8 @@ static int add_keyword_path(File fptr, const char *keyword,
return err + add_space(fptr);
}
-
static int add_keyword_string(File fptr, const char *keyword,
- bool should_use_quotes,
+ bool should_use_quotes,
const char *keystr)
{
int err= add_string(fptr, keyword);
@@ -2269,7 +2216,7 @@ static int add_column_list_values(File fptr, partition_info *part_info,
else
{
String *res;
- CHARSET_INFO *field_cs;
+ const CHARSET_INFO *field_cs;
bool need_cs_check= FALSE;
Item_result result_type= STRING_RESULT;
@@ -2728,7 +2675,7 @@ static inline int part_val_int(Item *item_expr, longlong *result)
We have a set of support functions for these 14 variants. There are 4
variants of hash functions and there is a function for each. The KEY
- partitioning uses the function calculate_key_value to calculate the hash
+ partitioning uses the function calculate_key_hash_value to calculate the hash
value based on an array of fields. The linear hash variants uses the
method get_part_id_from_linear_hash to get the partition id using the
hash value and some parameters calculated from the number of partitions.
@@ -2850,20 +2797,20 @@ static int get_part_id_linear_hash(partition_info *part_info,
}
-/*
+/**
Calculate part_id for (SUB)PARTITION BY KEY
- SYNOPSIS
- get_part_id_key()
- field_array Array of fields for PARTTION KEY
- num_parts Number of KEY partitions
+ @param file Handler to storage engine
+ @param field_array Array of fields for PARTTION KEY
+ @param num_parts Number of KEY partitions
+ @param func_value[out] Returns calculated hash value
- RETURN VALUE
- Calculated partition id
+ @return Calculated partition id
*/
inline
-static uint32 get_part_id_key(Field **field_array,
+static uint32 get_part_id_key(handler *file,
+ Field **field_array,
uint num_parts,
longlong *func_value)
{
@@ -2931,7 +2878,7 @@ static void copy_to_part_field_buffers(Field **ptr,
restore_ptr++;
if (!field->maybe_null() || !field->is_null())
{
- CHARSET_INFO *cs= ((Field_str*)field)->charset();
+ const CHARSET_INFO *cs= field->charset();
uint max_len= field->pack_length();
uint data_len= field->data_length();
uchar *field_buf= *field_bufs;
@@ -3583,7 +3530,8 @@ int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- *part_id= get_part_id_key(part_info->part_field_array,
+ *part_id= get_part_id_key(part_info->table->file,
+ part_info->part_field_array,
part_info->num_parts, func_value);
return 0;
}
@@ -3673,7 +3621,8 @@ int get_partition_id_key_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
- *part_id= get_part_id_key(part_info->subpart_field_array,
+ *part_id= get_part_id_key(part_info->table->file,
+ part_info->subpart_field_array,
part_info->num_subparts, &func_value);
return FALSE;
}
@@ -4298,9 +4247,11 @@ bool mysql_unpack_partition(THD *thd,
{
bool result= TRUE;
partition_info *part_info;
- CHARSET_INFO *old_character_set_client= thd->variables.character_set_client;
+ const CHARSET_INFO *old_character_set_client=
+ thd->variables.character_set_client;
LEX *old_lex= thd->lex;
LEX lex;
+ PSI_statement_locker *parent_locker= thd->m_statement_psi;
DBUG_ENTER("mysql_unpack_partition");
thd->variables.character_set_client= system_charset_info;
@@ -4330,12 +4281,16 @@ bool mysql_unpack_partition(THD *thd,
}
part_info= lex.part_info;
DBUG_PRINT("info", ("Parse: %s", part_buf));
+
+ thd->m_statement_psi= NULL;
if (parse_sql(thd, & parser_state, NULL) ||
part_info->fix_parser_data(thd))
{
thd->free_items();
+ thd->m_statement_psi= parent_locker;
goto end;
}
+ thd->m_statement_psi= parent_locker;
/*
The parsed syntax residing in the frm file can still contain defaults.
The reason is that the frm file is sometimes saved outside of this
@@ -4672,7 +4627,7 @@ bool compare_partition_options(HA_CREATE_INFO *table_create_info,
}
-/**
+/*
Prepare for ALTER TABLE of partition structure
@param[in] thd Thread object
@@ -4704,7 +4659,6 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
bool *partition_changed,
bool *fast_alter_table)
{
- TABLE *new_table= NULL;
DBUG_ENTER("prep_alter_part_table");
/* Foreign keys on partitioned tables are not supported, waits for WL#148 */
@@ -4764,15 +4718,21 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
alter_ctx->table_name,
MDL_INTENTION_EXCLUSIVE));
- new_table->use_all_columns();
-
- tab_part_info= new_table->part_info;
+ tab_part_info= table->part_info;
if (alter_info->flags & Alter_info::ALTER_TABLE_REORG)
{
uint new_part_no, curr_part_no;
+ /*
+ 'ALTER TABLE t REORG PARTITION' only allowed with auto partition
+ if default partitioning is used.
+ */
+
if (tab_part_info->part_type != HASH_PARTITION ||
- tab_part_info->use_default_num_partitions)
+ ((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
+ !tab_part_info->use_default_num_partitions) ||
+ ((!(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)) &&
+ tab_part_info->use_default_num_partitions))
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
goto err;
@@ -4786,7 +4746,23 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
after the change as before. Thus we can reply ok immediately
without any changes at all.
*/
- *fast_alter_table= true;
+ flags= table->file->alter_table_flags(alter_info->flags);
+ if (flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE))
+ {
+ *fast_alter_table= true;
+ /* Force table re-open for consistency with the main case. */
+ table->m_needs_reopen= true;
+ }
+ else
+ {
+ /*
+ Create copy of partition_info to avoid modifying original
+ TABLE::part_info, to keep it safe for later use.
+ */
+ if (!(tab_part_info= tab_part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+ }
+
thd->work_part_info= tab_part_info;
DBUG_RETURN(FALSE);
}
@@ -4814,6 +4790,30 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
goto err;
}
+ if ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0)
+ {
+ /*
+ "Fast" change of partitioning is supported in this case.
+ We will change TABLE::part_info (as this is how we pass
+ information to storage engine in this case), so the table
+ must be reopened.
+ */
+ *fast_alter_table= true;
+ table->m_needs_reopen= true;
+ }
+ else
+ {
+ /*
+ "Fast" changing of partitioning is not supported. Create
+ a copy of TABLE::part_info object, so we can modify it safely.
+ Modifying original TABLE::part_info will cause problems when
+ we read data from old version of table using this TABLE object
+ while copying them to new version of table.
+ */
+ if (!(tab_part_info= tab_part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_PRINT("info", ("*fast_alter_table flags: 0x%x", flags));
if ((alter_info->flags & Alter_info::ALTER_ADD_PARTITION) ||
(alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION))
{
@@ -5349,6 +5349,8 @@ state of p1.
alt_part_info->subpart_type= tab_part_info->subpart_type;
alt_part_info->num_subparts= tab_part_info->num_subparts;
DBUG_ASSERT(!alt_part_info->use_default_partitions);
+ /* We specified partitions explicitly so don't use defaults anymore. */
+ tab_part_info->use_default_partitions= FALSE;
if (alt_part_info->set_up_defaults_for_partitioning(table->file,
ULL(0),
0))
@@ -5572,7 +5574,9 @@ the generated partition syntax in a correct manner.
There was no partitioning before and no partitioning defined.
Obviously no work needed.
*/
- if (table->part_info)
+ partition_info *tab_part_info= table->part_info;
+
+ if (tab_part_info)
{
if (alter_info->flags & Alter_info::ALTER_REMOVE_PARTITIONING)
{
@@ -5580,7 +5584,7 @@ the generated partition syntax in a correct manner.
if (!(create_info->used_fields & HA_CREATE_USED_ENGINE))
{
DBUG_PRINT("info", ("No explicit engine used"));
- create_info->db_type= table->part_info->default_engine_type;
+ create_info->db_type= tab_part_info->default_engine_type;
}
DBUG_PRINT("info", ("New engine type: %s",
ha_resolve_storage_engine_name(create_info->db_type)));
@@ -5592,16 +5596,20 @@ the generated partition syntax in a correct manner.
/*
Retain partitioning but possibly with a new storage engine
beneath.
+
+ Create a copy of TABLE::part_info to be able to modify it freely.
*/
- thd->work_part_info= table->part_info;
+ if (!(tab_part_info= tab_part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+ thd->work_part_info= tab_part_info;
if (create_info->used_fields & HA_CREATE_USED_ENGINE &&
- create_info->db_type != table->part_info->default_engine_type)
+ create_info->db_type != tab_part_info->default_engine_type)
{
/*
Make sure change of engine happens to all partitions.
*/
DBUG_PRINT("info", ("partition changed"));
- if (table->part_info->is_auto_partitioned)
+ if (tab_part_info->is_auto_partitioned)
{
/*
If the user originally didn't specify partitioning to be
@@ -5629,7 +5637,7 @@ the generated partition syntax in a correct manner.
Need to cater for engine types that can handle partition without
using the partition handler.
*/
- if (thd->work_part_info != table->part_info)
+ if (thd->work_part_info != tab_part_info)
{
DBUG_PRINT("info", ("partition changed"));
*partition_changed= TRUE;
@@ -5646,8 +5654,8 @@ the generated partition syntax in a correct manner.
part_info->default_engine_type= create_info->db_type;
else
{
- if (table->part_info)
- part_info->default_engine_type= table->part_info->default_engine_type;
+ if (tab_part_info)
+ part_info->default_engine_type= tab_part_info->default_engine_type;
else
part_info->default_engine_type= create_info->db_type;
}
@@ -5708,9 +5716,7 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0);
-
- /* Disable transactions for all new tables */
- if (mysql_trans_prepare_alter_copy_data(thd))
+ if(mysql_trans_prepare_alter_copy_data(thd))
DBUG_RETURN(TRUE);
/* TODO: test if bulk_insert would increase the performance */
@@ -5725,7 +5731,6 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
if (mysql_trans_commit_alter_copy_data(thd))
error= 1; /* The error has been reported */
-
DBUG_RETURN(test(error));
}
@@ -6382,7 +6387,8 @@ static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
goto error;
if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
- lpt->alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION))
+ lpt->alter_info->flags &
+ Alter_info::ALTER_REORGANIZE_PARTITION))
goto error;
if (write_log_replace_delete_frm(lpt, next_entry, shadow_path, path, TRUE))
goto error;
@@ -6574,7 +6580,6 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
part_info= lpt->part_info->get_clone();
close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
}
-
else
{
err_exclusive_lock:
@@ -6743,6 +6748,7 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket,
@param table_list List of the table involved
@param db Database name of new table
@param table_name Table name of new table
+
@return Operation status
@retval TRUE Error
@retval FALSE Success
@@ -6874,7 +6880,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
before any other threads are started, so there are no locking issues).
4) Close the table that have already been opened but didn't stumble on
the abort locked previously. This is done as part of the
- the abort locked previously. This is done as part of the
alter_close_table call.
5) Write the bin log
Unfortunately the writing of the binlog is not synchronised with
@@ -7048,10 +7053,10 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
use a lower lock level. This can be handled inside store_lock in the
respective handler.
- 0) Write an entry that removes the shadow frm file if crash occurs
- 1) Write the shadow frm file of new partitioning
+ 0) Write an entry that removes the shadow frm file if crash occurs.
+ 1) Write the shadow frm file of new partitioning.
2) Log such that temporary partitions added in change phase are
- removed in a crash situation
+ removed in a crash situation.
3) Add the new partitions.
Copy from the reorganised partitions to the new partitions.
4) Get an exclusive metadata lock on the table (waits for all active
@@ -7069,7 +7074,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
10) Install the shadow frm file.
11) Reopen the table if under lock tables.
12) Complete query.
- */
+ */
if (write_log_drop_shadow_frm(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_1") ||
ERROR_INJECT_ERROR("fail_change_partition_1") ||
@@ -8075,8 +8080,7 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
while (part_iter->field_vals.cur != part_iter->field_vals.end)
{
longlong dummy;
- field->store(part_iter->field_vals.cur++,
- ((Field_num*)field)->unsigned_flag);
+ field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG);
if ((part_iter->part_info->is_sub_partitioned() &&
!part_iter->part_info->get_part_partition_id(part_iter->part_info,
&part_id, &dummy)) ||
@@ -8100,12 +8104,11 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
part_iter->field_vals.cur= part_iter->field_vals.start;
return NOT_A_PARTITION_ID;
}
- field->store(part_iter->field_vals.cur++, FALSE);
+ field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG);
if (part_iter->part_info->get_subpartition_id(part_iter->part_info,
&res))
return NOT_A_PARTITION_ID;
return res;
-
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 626344ef93d..01e62d70151 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -109,7 +109,8 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
tmp_name[name_len]= 0;
conv_name= tmp_name;
}
- res= strconvert(&my_charset_filename, conv_name, system_charset_info,
+ res= strconvert(&my_charset_filename, conv_name, name_len,
+ system_charset_info,
conv_string, FN_REFLEN, &errors);
if (!res || errors)
{
@@ -407,7 +408,7 @@ uint filename_to_tablename(const char *from, char *to, uint to_length
}
else
{
- res= strconvert(&my_charset_filename, from,
+ res= strconvert(&my_charset_filename, from, FN_REFLEN,
system_charset_info, to, to_length, &errors);
if (errors) // Old 5.0 name
{
@@ -508,7 +509,7 @@ uint tablename_to_filename(const char *from, char *to, uint to_length)
}
DBUG_RETURN(length);
}
- length= strconvert(system_charset_info, from,
+ length= strconvert(system_charset_info, from, FN_REFLEN,
&my_charset_filename, to, to_length, &errors);
if (check_if_legal_tablename(to) &&
length + 4 < to_length)
@@ -564,7 +565,7 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
db, table_name, ext, flags));
if (flags & FN_IS_TMP) // FN_FROM_IS_TMP | FN_TO_IS_TMP
- strnmov(tbbuff, table_name, sizeof(tbbuff));
+ strmake(tbbuff, table_name, sizeof(tbbuff)-1);
else
(void) tablename_to_filename(table_name, tbbuff, sizeof(tbbuff));
@@ -579,8 +580,11 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
pos= strnmov(pos, FN_ROOTDIR, end - pos);
pos= strxnmov(pos, end - pos, dbbuff, FN_ROOTDIR, NullS);
#ifdef USE_SYMDIR
- unpack_dirname(buff, buff);
- pos= strend(buff);
+ if (!(flags & SKIP_SYMDIR_ACCESS))
+ {
+ unpack_dirname(buff, buff);
+ pos= strend(buff);
+ }
#endif
pos= strxnmov(pos, end - pos, tbbuff, ext, NullS);
@@ -687,14 +691,14 @@ mysql_mutex_t LOCK_gdl;
#define DDL_LOG_NAME_LEN_POS 4
#define DDL_LOG_IO_SIZE_POS 8
-/*
- Read one entry from ddl log file
- SYNOPSIS
- read_ddl_log_file_entry()
- entry_no Entry number to read
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Read one entry from ddl log file.
+
+ @param entry_no Entry number to read
+
+ @return Operation status
+ @retval true Error
+ @retval false Success
*/
static bool read_ddl_log_file_entry(uint entry_no)
@@ -713,14 +717,14 @@ static bool read_ddl_log_file_entry(uint entry_no)
}
-/*
- Write one entry from ddl log file
- SYNOPSIS
- write_ddl_log_file_entry()
- entry_no Entry number to write
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Write one entry to ddl log file.
+
+ @param entry_no Entry number to write
+
+ @return Operation status
+ @retval true Error
+ @retval false Success
*/
static bool write_ddl_log_file_entry(uint entry_no)
@@ -731,7 +735,7 @@ static bool write_ddl_log_file_entry(uint entry_no)
DBUG_ENTER("write_ddl_log_file_entry");
mysql_mutex_assert_owner(&LOCK_gdl);
- if (mysql_file_pwrite(file_id, (uchar*)file_entry_buf,
+ if (mysql_file_pwrite(file_id, file_entry_buf,
IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
error= TRUE;
DBUG_RETURN(error);
@@ -769,7 +773,7 @@ static bool write_ddl_log_header()
int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
global_ddl_log.num_entries);
- const_var= FN_LEN;
+ const_var= FN_REFLEN;
int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
(ulong) const_var);
const_var= IO_SIZE;
@@ -784,13 +788,9 @@ static bool write_ddl_log_header()
}
-/*
- Create ddl log file name
- SYNOPSIS
- create_ddl_log_file_name()
- file_name Filename setup
- RETURN VALUES
- NONE
+/**
+ Create ddl log file name.
+ @param file_name Filename setup
*/
static inline void create_ddl_log_file_name(char *file_name)
@@ -799,17 +799,14 @@ static inline void create_ddl_log_file_name(char *file_name)
}
-/*
- Read header of ddl log file
- SYNOPSIS
- read_ddl_log_header()
- RETURN VALUES
- > 0 Last entry in ddl log
- 0 No entries in ddl log
- DESCRIPTION
- When we read the ddl log header we get information about maximum sizes
- of names in the ddl log and we also get information about the number
- of entries in the ddl log.
+/**
+ Read header of ddl log file.
+
+ When we read the ddl log header we get information about maximum sizes
+ of names in the ddl log and we also get information about the number
+ of entries in the ddl log.
+
+ @return Last entry in ddl log (0 if no entries)
*/
static uint read_ddl_log_header()
@@ -820,6 +817,8 @@ static uint read_ddl_log_header()
bool successful_open= FALSE;
DBUG_ENTER("read_ddl_log_header");
+ mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_SLOW);
+ mysql_mutex_lock(&LOCK_gdl);
create_ddl_log_file_name(file_name);
if ((global_ddl_log.file_id= mysql_file_open(key_file_global_ddl_log,
file_name,
@@ -848,36 +847,72 @@ static uint read_ddl_log_header()
global_ddl_log.first_free= NULL;
global_ddl_log.first_used= NULL;
global_ddl_log.num_entries= 0;
- mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_FAST);
global_ddl_log.do_release= true;
+ mysql_mutex_unlock(&LOCK_gdl);
DBUG_RETURN(entry_no);
}
-/*
- Read a ddl log entry
- SYNOPSIS
- read_ddl_log_entry()
- read_entry Number of entry to read
- out:entry_info Information from entry
- RETURN VALUES
- TRUE Error
- FALSE Success
- DESCRIPTION
- Read a specified entry in the ddl log
+/**
+ Convert from ddl_log_entry struct to file_entry_buf binary blob.
+
+ @param ddl_log_entry filled in ddl_log_entry struct.
*/
-bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
+static void set_global_from_ddl_log_entry(const DDL_LOG_ENTRY *ddl_log_entry)
{
- char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
+ (char)DDL_LOG_ENTRY_CODE;
+ global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
+ (char)ddl_log_entry->action_type;
+ global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
+ ddl_log_entry->next_entry);
+ DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
+ ddl_log_entry->name, FN_REFLEN - 1);
+ if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
+ ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION ||
+ ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION)
+ {
+ DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN],
+ ddl_log_entry->from_name, FN_REFLEN - 1);
+ }
+ else
+ global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0;
+ DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_REFLEN)],
+ ddl_log_entry->handler_name, FN_REFLEN - 1);
+ if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION)
+ {
+ DBUG_ASSERT(strlen(ddl_log_entry->tmp_name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)],
+ ddl_log_entry->tmp_name, FN_REFLEN - 1);
+ }
+ else
+ global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)]= 0;
+}
+
+
+/**
+ Convert from file_entry_buf binary blob to ddl_log_entry struct.
+
+ @param[out] ddl_log_entry struct to fill in.
+
+ @note Strings (names) are pointing to the global_ddl_log structure,
+ so LOCK_gdl needs to be hold until they are read or copied.
+*/
+
+static void set_ddl_log_entry_from_global(DDL_LOG_ENTRY *ddl_log_entry,
+ const uint read_entry)
+{
+ char *file_entry_buf= (char*) global_ddl_log.file_entry_buf;
uint inx;
uchar single_char;
- DBUG_ENTER("read_ddl_log_entry");
- if (read_ddl_log_file_entry(read_entry))
- {
- DBUG_RETURN(TRUE);
- }
+ mysql_mutex_assert_owner(&LOCK_gdl);
ddl_log_entry->entry_pos= read_entry;
single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
@@ -890,22 +925,51 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
ddl_log_entry->from_name= &file_entry_buf[inx];
inx+= global_ddl_log.name_len;
ddl_log_entry->handler_name= &file_entry_buf[inx];
+ if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION)
+ {
+ inx+= global_ddl_log.name_len;
+ ddl_log_entry->tmp_name= &file_entry_buf[inx];
+ }
+ else
+ ddl_log_entry->tmp_name= NULL;
+}
+
+
+/**
+ Read a ddl log entry.
+
+ Read a specified entry in the ddl log.
+
+ @param read_entry Number of entry to read
+ @param[out] entry_info Information from entry
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+*/
+
+static bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
+{
+ DBUG_ENTER("read_ddl_log_entry");
+
+ if (read_ddl_log_file_entry(read_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ set_ddl_log_entry_from_global(ddl_log_entry, read_entry);
DBUG_RETURN(FALSE);
}
-/*
- Initialise ddl log
- SYNOPSIS
- init_ddl_log()
+/**
+ Initialise ddl log.
- DESCRIPTION
- Write the header of the ddl log file and length of names. Also set
- number of entries to zero.
+ Write the header of the ddl log file and length of names. Also set
+ number of entries to zero.
- RETURN VALUES
- TRUE Error
- FALSE Success
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static bool init_ddl_log()
@@ -917,7 +981,7 @@ static bool init_ddl_log()
goto end;
global_ddl_log.io_size= IO_SIZE;
- global_ddl_log.name_len= FN_LEN;
+ global_ddl_log.name_len= FN_REFLEN;
create_ddl_log_file_name(file_name);
if ((global_ddl_log.file_id= mysql_file_create(key_file_global_ddl_log,
file_name, CREATE_MODE,
@@ -1043,14 +1107,14 @@ static bool deactivate_ddl_log_entry_no_lock(uint entry_no)
}
-/*
+/**
Execute one action in a ddl log entry
- SYNOPSIS
- execute_ddl_log_action()
- ddl_log_entry Information in action entry to execute
- RETURN VALUES
- TRUE Error
- FALSE Success
+
+ @param ddl_log_entry Information in action entry to execute
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
@@ -1068,6 +1132,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
handlerton *hton;
DBUG_ENTER("execute_ddl_log_action");
+ mysql_mutex_assert_owner(&LOCK_gdl);
if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE)
{
DBUG_RETURN(FALSE);
@@ -1132,7 +1197,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
}
if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos)))
break;
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
error= FALSE;
if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION)
break;
@@ -1165,9 +1230,9 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
ddl_log_entry->name))
break;
}
- if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos)))
break;
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
error= FALSE;
break;
}
@@ -1234,14 +1299,14 @@ error:
}
-/*
+/**
Get a free entry in the ddl log
- SYNOPSIS
- get_free_ddl_log_entry()
- out:active_entry A ddl log memory entry returned
- RETURN VALUES
- TRUE Error
- FALSE Success
+
+ @param[out] active_entry A ddl log memory entry returned
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
@@ -1284,24 +1349,67 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
}
+/**
+ Execute one entry in the ddl log.
+
+ Executing an entry means executing a linked list of actions.
+
+ @param first_entry Reference to first action in entry
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+*/
+
+static bool execute_ddl_log_entry_no_lock(THD *thd, uint first_entry)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ uint read_entry= first_entry;
+ DBUG_ENTER("execute_ddl_log_entry_no_lock");
+
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ do
+ {
+ if (read_ddl_log_entry(read_entry, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to read entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
+ ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
+
+ if (execute_ddl_log_action(thd, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to execute action for entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ read_entry= ddl_log_entry.next_entry;
+ } while (read_entry);
+ DBUG_RETURN(FALSE);
+}
+
+
/*
External interface methods for the DDL log Module
---------------------------------------------------
*/
-/*
- SYNOPSIS
- write_ddl_log_entry()
- ddl_log_entry Information about log entry
- out:entry_written Entry information written into
+/**
+ Write a ddl log entry.
- RETURN VALUES
- TRUE Error
- FALSE Success
+ A careful write of the ddl log is performed to ensure that we can
+ handle crashes occurring during CREATE and ALTER TABLE processing.
- DESCRIPTION
- A careful write of the ddl log is performed to ensure that we can
- handle crashes occurring during CREATE and ALTER TABLE processing.
+ @param ddl_log_entry Information about log entry
+ @param[out] entry_written Entry information written into
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
@@ -1310,46 +1418,29 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
bool error, write_header;
DBUG_ENTER("write_ddl_log_entry");
+ mysql_mutex_assert_owner(&LOCK_gdl);
if (init_ddl_log())
{
DBUG_RETURN(TRUE);
}
- global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
- (char)DDL_LOG_ENTRY_CODE;
- global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
- (char)ddl_log_entry->action_type;
- global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
- int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
- ddl_log_entry->next_entry);
- DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_LEN);
- strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
- ddl_log_entry->name, FN_LEN - 1);
- if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
- ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
- {
- DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_LEN);
- strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN],
- ddl_log_entry->from_name, FN_LEN - 1);
- }
- else
- global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
- DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_LEN);
- strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_LEN)],
- ddl_log_entry->handler_name, FN_LEN - 1);
+ set_global_from_ddl_log_entry(ddl_log_entry);
if (get_free_ddl_log_entry(active_entry, &write_header))
{
DBUG_RETURN(TRUE);
}
error= FALSE;
DBUG_PRINT("ddl_log",
- ("write type %c next %u name '%s' from_name '%s' handler '%s'",
+ ("write type %c next %u name '%s' from_name '%s' handler '%s'"
+ " tmp_name '%s'",
(char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
ddl_log_entry->next_entry,
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
- + FN_LEN],
+ + FN_REFLEN],
(char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
- + (2*FN_LEN)]));
+ + (2*FN_REFLEN)],
+ (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
+ + (3*FN_REFLEN)]));
if (write_ddl_log_file_entry((*active_entry)->entry_pos))
{
error= TRUE;
@@ -1358,7 +1449,7 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
}
if (write_header && !error)
{
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
if (write_ddl_log_header())
error= TRUE;
}
@@ -1368,31 +1459,30 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
}
-/*
- Write final entry in the ddl log
- SYNOPSIS
- write_execute_ddl_log_entry()
- first_entry First entry in linked list of entries
+/**
+ @brief Write final entry in the ddl log.
+
+ @details This is the last write in the ddl log. The previous log entries
+ have already been written but not yet synched to disk.
+ We write a couple of log entries that describes action to perform.
+ This entries are set-up in a linked list, however only when a first
+ execute entry is put as the first entry these will be executed.
+ This routine writes this first.
+
+ @param first_entry First entry in linked list of entries
to execute, if 0 = NULL it means that
the entry is removed and the entries
are put into the free list.
- complete Flag indicating we are simply writing
+ @param complete Flag indicating we are simply writing
info about that entry has been completed
- in:out:active_entry Entry to execute, 0 = NULL if the entry
+ @param[in,out] active_entry Entry to execute, 0 = NULL if the entry
is written first time and needs to be
returned. In this case the entry written
is returned in this parameter
- RETURN VALUES
- TRUE Error
- FALSE Success
- DESCRIPTION
- This is the last write in the ddl log. The previous log entries have
- already been written but not yet synched to disk.
- We write a couple of log entries that describes action to perform.
- This entries are set-up in a linked list, however only when a first
- execute entry is put as the first entry these will be executed.
- This routine writes this first
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool write_execute_ddl_log_entry(uint first_entry,
@@ -1403,6 +1493,7 @@ bool write_execute_ddl_log_entry(uint first_entry,
char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
DBUG_ENTER("write_execute_ddl_log_entry");
+ mysql_mutex_assert_owner(&LOCK_gdl);
if (init_ddl_log())
{
DBUG_RETURN(TRUE);
@@ -1415,7 +1506,7 @@ bool write_execute_ddl_log_entry(uint first_entry,
any log entries before, we are only here to write the execute
entry to indicate it is done.
*/
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE;
}
else
@@ -1424,14 +1515,15 @@ bool write_execute_ddl_log_entry(uint first_entry,
file_entry_buf[DDL_LOG_PHASE_POS]= 0;
int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
file_entry_buf[DDL_LOG_NAME_POS]= 0;
- file_entry_buf[DDL_LOG_NAME_POS + FN_LEN]= 0;
- file_entry_buf[DDL_LOG_NAME_POS + 2*FN_LEN]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + 2*FN_REFLEN]= 0;
if (!(*active_entry))
{
if (get_free_ddl_log_entry(active_entry, &write_header))
{
DBUG_RETURN(TRUE);
}
+ write_header= TRUE;
}
if (write_ddl_log_file_entry((*active_entry)->entry_pos))
{
@@ -1439,7 +1531,7 @@ bool write_execute_ddl_log_entry(uint first_entry,
release_ddl_log_memory_entry(*active_entry);
DBUG_RETURN(TRUE);
}
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
if (write_header)
{
if (write_ddl_log_header())
@@ -1452,106 +1544,54 @@ bool write_execute_ddl_log_entry(uint first_entry,
}
-/*
- For complex rename operations we need to deactivate individual entries.
- SYNOPSIS
- deactivate_ddl_log_entry()
- entry_no Entry position of record to change
- RETURN VALUES
- TRUE Error
- FALSE Success
- DESCRIPTION
- During replace operations where we start with an existing table called
- t1 and a replacement table called t1#temp or something else and where
- we want to delete t1 and rename t1#temp to t1 this is not possible to
- do in a safe manner unless the ddl log is informed of the phases in
- the change.
-
- Delete actions are 1-phase actions that can be ignored immediately after
- being executed.
- Rename actions from x to y is also a 1-phase action since there is no
- interaction with any other handlers named x and y.
- Replace action where drop y and x -> y happens needs to be a two-phase
- action. Thus the first phase will drop y and the second phase will
- rename x -> y.
+/**
+ Deactivate an individual entry.
+
+ @details see deactivate_ddl_log_entry_no_lock.
+
+ @param entry_no Entry position of record to change
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool deactivate_ddl_log_entry(uint entry_no)
{
- char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
+ bool error;
DBUG_ENTER("deactivate_ddl_log_entry");
- if (!read_ddl_log_file_entry(entry_no))
- {
- if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
- {
- if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION ||
- file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION ||
- (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION &&
- file_entry_buf[DDL_LOG_PHASE_POS] == 1))
- file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
- else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION)
- {
- DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0);
- file_entry_buf[DDL_LOG_PHASE_POS]= 1;
- }
- else
- {
- DBUG_ASSERT(0);
- }
- if (write_ddl_log_file_entry(entry_no))
- {
- sql_print_error("Error in deactivating log entry. Position = %u",
- entry_no);
- DBUG_RETURN(TRUE);
- }
- }
- }
- else
- {
- sql_print_error("Failed in reading entry before deactivating it");
- DBUG_RETURN(TRUE);
- }
- DBUG_RETURN(FALSE);
+ mysql_mutex_lock(&LOCK_gdl);
+ error= deactivate_ddl_log_entry_no_lock(entry_no);
+ mysql_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(error);
}
-/*
- Sync ddl log file
- SYNOPSIS
- sync_ddl_log()
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Sync ddl log file.
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool sync_ddl_log()
{
- bool error= FALSE;
+ bool error;
DBUG_ENTER("sync_ddl_log");
- if ((!global_ddl_log.recovery_phase) &&
- init_ddl_log())
- {
- DBUG_RETURN(TRUE);
- }
- if (mysql_file_sync(global_ddl_log.file_id, MYF(0)))
- {
- /* Write to error log */
- sql_print_error("Failed to sync ddl log");
- error= TRUE;
- }
+ mysql_mutex_lock(&LOCK_gdl);
+ error= sync_ddl_log_no_lock();
+ mysql_mutex_unlock(&LOCK_gdl);
+
DBUG_RETURN(error);
}
-/*
- Release a log memory entry
- SYNOPSIS
- release_ddl_log_memory_entry()
- log_memory_entry Log memory entry to release
- RETURN VALUES
- NONE
+/**
+ Release a log memory entry.
+ @param log_memory_entry Log memory entry to release
*/
void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
@@ -1561,6 +1601,7 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
DBUG_ENTER("release_ddl_log_memory_entry");
+ mysql_mutex_assert_owner(&LOCK_gdl);
global_ddl_log.first_free= log_entry;
log_entry->next_log_entry= first_free;
@@ -1574,56 +1615,32 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
}
-/*
- Execute one entry in the ddl log. Executing an entry means executing
- a linked list of actions.
- SYNOPSIS
- execute_ddl_log_entry()
- first_entry Reference to first action in entry
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Execute one entry in the ddl log.
+
+ Executing an entry means executing a linked list of actions.
+
+ @param first_entry Reference to first action in entry
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool execute_ddl_log_entry(THD *thd, uint first_entry)
{
- DDL_LOG_ENTRY ddl_log_entry;
- uint read_entry= first_entry;
+ bool error;
DBUG_ENTER("execute_ddl_log_entry");
mysql_mutex_lock(&LOCK_gdl);
- do
- {
- if (read_ddl_log_entry(read_entry, &ddl_log_entry))
- {
- /* Write to error log and continue with next log entry */
- sql_print_error("Failed to read entry = %u from ddl log",
- read_entry);
- break;
- }
- DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
- ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
-
- if (execute_ddl_log_action(thd, &ddl_log_entry))
- {
- /* Write to error log and continue with next log entry */
- sql_print_error("Failed to execute action for entry = %u from ddl log",
- read_entry);
- break;
- }
- read_entry= ddl_log_entry.next_entry;
- } while (read_entry);
+ error= execute_ddl_log_entry_no_lock(thd, first_entry);
mysql_mutex_unlock(&LOCK_gdl);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(error);
}
-/*
- Close the ddl log
- SYNOPSIS
- close_ddl_log()
- RETURN VALUES
- NONE
+/**
+ Close the ddl log.
*/
static void close_ddl_log()
@@ -1638,12 +1655,8 @@ static void close_ddl_log()
}
-/*
- Execute the ddl log at recovery of MySQL Server
- SYNOPSIS
- execute_ddl_log_recovery()
- RETURN VALUES
- NONE
+/**
+ Execute the ddl log at recovery of MySQL Server.
*/
void execute_ddl_log_recovery()
@@ -1676,6 +1689,7 @@ void execute_ddl_log_recovery()
/* this also initialize LOCK_gdl */
num_entries= read_ddl_log_header();
+ mysql_mutex_lock(&LOCK_gdl);
for (i= 1; i < num_entries + 1; i++)
{
if (read_ddl_log_entry(i, &ddl_log_entry))
@@ -1686,7 +1700,7 @@ void execute_ddl_log_recovery()
}
if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
{
- if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
+ if (execute_ddl_log_entry_no_lock(thd, ddl_log_entry.next_entry))
{
/* Real unpleasant scenario but we continue anyways. */
continue;
@@ -1697,6 +1711,7 @@ void execute_ddl_log_recovery()
create_ddl_log_file_name(file_name);
(void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0));
global_ddl_log.recovery_phase= FALSE;
+ mysql_mutex_unlock(&LOCK_gdl);
delete thd;
/* Remember that we don't have a THD */
set_current_thd(0);
@@ -1704,24 +1719,22 @@ void execute_ddl_log_recovery()
}
-/*
- Release all memory allocated to the ddl log
- SYNOPSIS
- release_ddl_log()
- RETURN VALUES
- NONE
+/**
+ Release all memory allocated to the ddl log.
*/
void release_ddl_log()
{
- DDL_LOG_MEMORY_ENTRY *free_list= global_ddl_log.first_free;
- DDL_LOG_MEMORY_ENTRY *used_list= global_ddl_log.first_used;
+ DDL_LOG_MEMORY_ENTRY *free_list;
+ DDL_LOG_MEMORY_ENTRY *used_list;
DBUG_ENTER("release_ddl_log");
if (!global_ddl_log.do_release)
DBUG_VOID_RETURN;
mysql_mutex_lock(&LOCK_gdl);
+ free_list= global_ddl_log.first_free;
+ used_list= global_ddl_log.first_used;
while (used_list)
{
DDL_LOG_MEMORY_ENTRY *tmp= used_list->next_log_entry;
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index 9603ca30cfa..b8126fab048 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -265,27 +265,22 @@ uint check_word(TYPELIB *lib, const char *val, const char *end,
*/
-uint strconvert(CHARSET_INFO *from_cs, const char *from,
+uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors)
{
int cnvres;
my_wc_t wc;
char *to_start= to;
uchar *to_end= (uchar*) to + to_length - 1;
+ const uchar *from_end= (const uchar*) from + from_length;
my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
uint error_count= 0;
while (1)
{
- /*
- Using 'from + 10' is safe:
- - it is enough to scan a single character in any character set.
- - if remaining string is shorter than 10, then mb_wc will return
- with error because of unexpected '\0' character.
- */
if ((cnvres= (*mb_wc)(from_cs, &wc,
- (uchar*) from, (uchar*) from + 10)) > 0)
+ (uchar*) from, from_end)) > 0)
{
if (!wc)
break;
diff --git a/sql/strfunc.h b/sql/strfunc.h
index 57c5427fcd0..7b031710c76 100644
--- a/sql/strfunc.h
+++ b/sql/strfunc.h
@@ -43,7 +43,7 @@ char *set_to_string(THD *thd, LEX_STRING *result, ulonglong set,
/*
These functions were protected by INNODB_COMPATIBILITY_HOOKS
*/
-uint strconvert(CHARSET_INFO *from_cs, const char *from,
+uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors);
#endif /* STRFUNC_INCLUDED */