summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc10
-rw-r--r--sql/ha_ndbcluster_binlog.cc135
-rw-r--r--sql/item_cmpfunc.cc12
-rw-r--r--sql/item_row.cc16
-rw-r--r--sql/item_row.h3
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/mysqld.cc33
-rw-r--r--sql/opt_range.cc2
-rw-r--r--sql/set_var.cc39
-rw-r--r--sql/set_var.h57
-rw-r--r--sql/share/errmsg.txt2
-rw-r--r--sql/sql_class.cc116
-rw-r--r--sql/sql_class.h29
-rw-r--r--sql/sql_prepare.cc17
14 files changed, 333 insertions, 140 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 1176257359f..9c504f186b3 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -5989,7 +5989,7 @@ int Field_str::store(double nr)
uint Field::is_equal(create_field *new_field)
{
- return (new_field->sql_type == type());
+ return (new_field->sql_type == real_type());
}
@@ -6001,7 +6001,7 @@ uint Field_str::is_equal(create_field *new_field)
(flags & (BINCMP_FLAG | BINARY_FLAG))))
return 0; /* One of the fields is binary and the other one isn't */
- return ((new_field->sql_type == type()) &&
+ return ((new_field->sql_type == real_type()) &&
new_field->charset == field_charset &&
new_field->length == max_length());
}
@@ -6798,7 +6798,7 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root,
uint Field_varstring::is_equal(create_field *new_field)
{
- if (new_field->sql_type == type() &&
+ if (new_field->sql_type == real_type() &&
new_field->charset == field_charset)
{
if (new_field->length == max_length())
@@ -7957,12 +7957,12 @@ bool Field_num::eq_def(Field *field)
uint Field_num::is_equal(create_field *new_field)
{
- return ((new_field->sql_type == type()) &&
+ return ((new_field->sql_type == real_type()) &&
((new_field->flags & UNSIGNED_FLAG) == (uint) (flags &
UNSIGNED_FLAG)) &&
((new_field->flags & AUTO_INCREMENT_FLAG) ==
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
- (new_field->length >= max_length()));
+ (new_field->length <= max_length()));
}
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 79e4fc790e0..ec5b5858f5c 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -1430,6 +1430,10 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
NDB_SHARE *share)
{
DBUG_ENTER("ndb_handle_schema_change");
+ TABLE* table= share->table;
+ TABLE_SHARE *table_share= table->s;
+ const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
bool do_close_cached_tables= FALSE;
bool is_online_alter_table= FALSE;
bool is_rename_table= FALSE;
@@ -1449,70 +1453,68 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
}
}
- if (is_remote_change) /* includes CLUSTER_FAILURE */
+ /*
+ Refresh local dictionary cache by
+ invalidating table and all it's indexes
+ */
+ ndb->setDatabaseName(dbname);
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ DBUG_ASSERT(thd_ndb != NULL);
+ Ndb* old_ndb= thd_ndb->ndb;
+ thd_ndb->ndb= ndb;
+ ha_ndbcluster table_handler(table_share);
+ (void)strxmov(table_handler.m_dbname, dbname, NullS);
+ (void)strxmov(table_handler.m_tabname, tabname, NullS);
+ table_handler.open_indexes(ndb, table, TRUE);
+ table_handler.invalidate_dictionary_cache(TRUE);
+ thd_ndb->ndb= old_ndb;
+
+ /*
+ Refresh local frm file and dictionary cache if
+ remote on-line alter table
+ */
+ if (is_remote_change && is_online_alter_table)
{
- TABLE* table= share->table;
- TABLE_SHARE *table_share= table->s;
- const char *dbname= table_share->db.str;
+ const char *tabname= table_share->table_name.str;
+ char key[FN_REFLEN];
+ const void *data= 0, *pack_data= 0;
+ uint length, pack_length;
+ int error;
+ NDBDICT *dict= ndb->getDictionary();
+ const NDBTAB *altered_table= pOp->getTable();
- /*
- Invalidate table and all it's indexes
+ DBUG_PRINT("info", ("Detected frm change of table %s.%s",
+ dbname, tabname));
+ build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
+ /*
+ If the frm of the altered table is different than the one on
+ disk then overwrite it with the new table definition
*/
- ndb->setDatabaseName(dbname);
- Thd_ndb *thd_ndb= get_thd_ndb(thd);
- DBUG_ASSERT(thd_ndb != NULL);
- Ndb* old_ndb= thd_ndb->ndb;
- thd_ndb->ndb= ndb;
- ha_ndbcluster table_handler(table_share);
- table_handler.set_dbname(share->key);
- table_handler.set_tabname(share->key);
- table_handler.open_indexes(ndb, table, TRUE);
- table_handler.invalidate_dictionary_cache(TRUE);
- thd_ndb->ndb= old_ndb;
-
- if (is_online_alter_table)
- {
- const char *tabname= table_share->table_name.str;
- char key[FN_REFLEN];
- const void *data= 0, *pack_data= 0;
- uint length, pack_length;
- int error;
- NDBDICT *dict= ndb->getDictionary();
- const NDBTAB *altered_table= pOp->getTable();
-
- DBUG_PRINT("info", ("Detected frm change of table %s.%s",
- dbname, tabname));
- build_table_filename(key, FN_LEN-1, dbname, tabname, NullS);
- /*
- If the frm of the altered table is different than the one on
- disk then overwrite it with the new table definition
- */
- if (readfrm(key, &data, &length) == 0 &&
- packfrm(data, length, &pack_data, &pack_length) == 0 &&
- cmp_frm(altered_table, pack_data, pack_length))
+ if (readfrm(key, &data, &length) == 0 &&
+ packfrm(data, length, &pack_data, &pack_length) == 0 &&
+ cmp_frm(altered_table, pack_data, pack_length))
+ {
+ DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
+ altered_table->getFrmLength());
+ pthread_mutex_lock(&LOCK_open);
+ const NDBTAB *old= dict->getTable(tabname);
+ if (!old &&
+ old->getObjectVersion() != altered_table->getObjectVersion())
+ dict->putTable(altered_table);
+
+ if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
+ (error= writefrm(key, data, length)))
{
- DBUG_DUMP("frm", (char*)altered_table->getFrmData(),
- altered_table->getFrmLength());
- pthread_mutex_lock(&LOCK_open);
- const NDBTAB *old= dict->getTable(tabname);
- if (!old &&
- old->getObjectVersion() != altered_table->getObjectVersion())
- dict->putTable(altered_table);
-
- if ((error= unpackfrm(&data, &length, altered_table->getFrmData())) ||
- (error= writefrm(key, data, length)))
- {
- sql_print_information("NDB: Failed write frm for %s.%s, error %d",
- dbname, tabname, error);
- }
- ndbcluster_binlog_close_table(thd, share);
- close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
- if ((error= ndbcluster_binlog_open_table(thd, share,
- table_share, table)))
- sql_print_information("NDB: Failed to re-open table %s.%s",
- dbname, tabname);
- pthread_mutex_unlock(&LOCK_open);
+ sql_print_information("NDB: Failed write frm for %s.%s, error %d",
+ dbname, tabname, error);
}
+ ndbcluster_binlog_close_table(thd, share);
+ close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, TRUE);
+ if ((error= ndbcluster_binlog_open_table(thd, share,
+ table_share, table)))
+ sql_print_information("NDB: Failed to re-open table %s.%s",
+ dbname, tabname);
+ pthread_mutex_unlock(&LOCK_open);
}
}
@@ -1540,6 +1542,21 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp,
share->table->s->db.length= strlen(share->db);
share->table->s->table_name.str= share->table_name;
share->table->s->table_name.length= strlen(share->table_name);
+ /*
+ Refresh local dictionary cache by invalidating any
+ old table with same name and all it's indexes
+ */
+ ndb->setDatabaseName(dbname);
+ Thd_ndb *thd_ndb= get_thd_ndb(thd);
+ DBUG_ASSERT(thd_ndb != NULL);
+ Ndb* old_ndb= thd_ndb->ndb;
+ thd_ndb->ndb= ndb;
+ ha_ndbcluster table_handler(table_share);
+ table_handler.set_dbname(share->key);
+ table_handler.set_tabname(share->key);
+ table_handler.open_indexes(ndb, table, TRUE);
+ table_handler.invalidate_dictionary_cache(TRUE);
+ thd_ndb->ndb= old_ndb;
}
DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
if (share->op_old == pOp)
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 493c3dbc60e..acee912c912 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -52,7 +52,6 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
{
uint i;
Field *field= NULL;
- bool all_constant= TRUE;
/* If the first argument is a FIELD_ITEM, pull out the field. */
if (items[0]->real_item()->type() == Item::FIELD_ITEM)
@@ -65,16 +64,9 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems)
for (i= 1; i < nitems; i++)
{
type[0]= item_cmp_type(type[0], items[i]->result_type());
- if (field && !convert_constant_item(thd, field, &items[i]))
- all_constant= FALSE;
+ if (field && convert_constant_item(thd, field, &items[i]))
+ type[0]= INT_RESULT;
}
-
- /*
- If we had a field that can be compared as a longlong, and all constant
- items, then the aggregate result will be an INT_RESULT.
- */
- if (field && all_constant)
- type[0]= INT_RESULT;
}
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 75c3f8a2922..f5c8d511025 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -26,7 +26,7 @@
*/
Item_row::Item_row(List<Item> &arg):
- Item(), used_tables_cache(0), array_holder(1), const_item_cache(1), with_null(0)
+ Item(), used_tables_cache(0), const_item_cache(1), with_null(0)
{
//TODO: think placing 2-3 component items in item (as it done for function)
@@ -85,6 +85,20 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
}
+void Item_row::cleanup()
+{
+ DBUG_ENTER("Item_row::cleanup");
+
+ Item::cleanup();
+ /* Reset to the original values */
+ used_tables_cache= 0;
+ const_item_cache= 1;
+ with_null= 0;
+
+ DBUG_VOID_RETURN;
+}
+
+
void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array,
List<Item> &fields)
{
diff --git a/sql/item_row.h b/sql/item_row.h
index 6fbe7436b72..d6dd4371372 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -19,7 +19,6 @@ class Item_row: public Item
Item **items;
table_map used_tables_cache;
uint arg_count;
- bool array_holder;
bool const_item_cache;
bool with_null;
public:
@@ -29,7 +28,6 @@ public:
items(item->items),
used_tables_cache(item->used_tables_cache),
arg_count(item->arg_count),
- array_holder(0),
const_item_cache(item->const_item_cache),
with_null(0)
{}
@@ -62,6 +60,7 @@ public:
return 0;
};
bool fix_fields(THD *thd, Item **ref);
+ void cleanup();
void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields);
table_map used_tables() const { return used_tables_cache; };
bool const_item() const { return const_item_cache; };
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index c471b11fee2..5773f0476a9 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1296,6 +1296,7 @@ extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
extern ulong what_to_log,flush_time;
extern ulong query_buff_size, thread_stack;
+extern ulong max_prepared_stmt_count, prepared_stmt_count;
extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit;
extern ulong max_binlog_size, max_relay_log_size;
#ifdef HAVE_ROW_BASED_REPLICATION
@@ -1350,6 +1351,7 @@ extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_lock_db,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock,
LOCK_global_system_variables, LOCK_user_conn,
+ LOCK_prepared_stmt_count,
LOCK_bytes_sent, LOCK_bytes_received;
#ifdef HAVE_OPENSSL
extern pthread_mutex_t LOCK_des_key_file;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 541cc13aaea..73bd0d57eb3 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -500,6 +500,22 @@ ulong specialflag=0;
ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
uint max_user_connections= 0;
+/*
+ Limit of the total number of prepared statements in the server.
+ Is necessary to protect the server against out-of-memory attacks.
+*/
+ulong max_prepared_stmt_count;
+/*
+ Current total number of prepared statements in the server. This number
+ is exact, and therefore may not be equal to the difference between
+ `com_stmt_prepare' and `com_stmt_close' (global status variables), as
+ the latter ones account for all registered attempts to prepare
+ a statement (including unsuccessful ones). Prepared statements are
+ currently connection-local: if the same SQL query text is prepared in
+ two different connections, this counts as two distinct prepared
+ statements.
+*/
+ulong prepared_stmt_count=0;
ulong thread_id=1L,current_pid;
ulong slow_launch_threads = 0, sync_binlog_period;
ulong expire_logs_days = 0;
@@ -577,6 +593,14 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi;
+/*
+ The below lock protects access to two global server variables:
+ max_prepared_stmt_count and prepared_stmt_count. These variables
+ set the limit and hold the current total number of prepared statements
+ in the server, respectively. As PREPARE/DEALLOCATE rate in a loaded
+ server may be fairly high, we need a dedicated lock.
+*/
+pthread_mutex_t LOCK_prepared_stmt_count;
#ifdef HAVE_OPENSSL
pthread_mutex_t LOCK_des_key_file;
#endif
@@ -1288,6 +1312,7 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_global_system_variables);
(void) pthread_mutex_destroy(&LOCK_global_read_lock);
(void) pthread_mutex_destroy(&LOCK_uuid_generator);
+ (void) pthread_mutex_destroy(&LOCK_prepared_stmt_count);
(void) pthread_cond_destroy(&COND_thread_count);
(void) pthread_cond_destroy(&COND_refresh);
(void) pthread_cond_destroy(&COND_thread_cache);
@@ -2810,6 +2835,7 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
#ifdef HAVE_OPENSSL
(void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
@@ -4634,7 +4660,8 @@ enum options_mysqld
OPT_MAX_BINLOG_CACHE_SIZE, OPT_MAX_BINLOG_SIZE,
OPT_MAX_CONNECTIONS, OPT_MAX_CONNECT_ERRORS,
OPT_MAX_DELAYED_THREADS, OPT_MAX_HEP_TABLE_SIZE,
- OPT_MAX_JOIN_SIZE, OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
+ OPT_MAX_JOIN_SIZE, OPT_MAX_PREPARED_STMT_COUNT,
+ OPT_MAX_RELAY_LOG_SIZE, OPT_MAX_SORT_LENGTH,
OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS,
OPT_MAX_LENGTH_FOR_SORT_DATA,
OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE,
@@ -5890,6 +5917,10 @@ The minimum value for this variable is 4096.",
(gptr*) &global_system_variables.max_length_for_sort_data,
(gptr*) &max_system_variables.max_length_for_sort_data, 0, GET_ULONG,
REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0},
+ {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT,
+ "Maximum numbrer of prepared statements in the server.",
+ (gptr*) &max_prepared_stmt_count, (gptr*) &max_prepared_stmt_count,
+ 0, GET_ULONG, REQUIRED_ARG, 16382, 0, 1*1024*1024, 0, 1, 0},
{"max_relay_log_size", OPT_MAX_RELAY_LOG_SIZE,
"If non-zero: relay log will be rotated automatically when the size exceeds this value; if zero (the default): when the size exceeds max_binlog_size. 0 excepted, the minimum value for this variable is 4096.",
(gptr*) &max_relay_log_size, (gptr*) &max_relay_log_size, 0, GET_ULONG,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 42f723eb382..3fddd780171 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -4370,7 +4370,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
sizeof(ROR_SCAN_INFO*)*
best_num)))
DBUG_RETURN(NULL);
- memcpy(trp->first_scan, ror_scan_mark, best_num*sizeof(ROR_SCAN_INFO*));
+ memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(ROR_SCAN_INFO*));
trp->last_scan= trp->first_scan + best_num;
trp->is_covering= TRUE;
trp->read_cost= total_cost;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index f2694f651f4..59e0c7b6ff7 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -161,6 +161,7 @@ static KEY_CACHE *create_key_cache(const char *name, uint length);
void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
+static byte *get_prepared_stmt_count(THD *thd);
/*
Variable definition list
@@ -311,6 +312,10 @@ sys_var_thd_ha_rows sys_sql_max_join_size("sql_max_join_size",
&SV::max_join_size,
fix_max_join_size);
#endif
+static sys_var_long_ptr_global
+sys_max_prepared_stmt_count("max_prepared_stmt_count",
+ &max_prepared_stmt_count,
+ &LOCK_prepared_stmt_count);
sys_var_long_ptr sys_max_relay_log_size("max_relay_log_size",
&max_relay_log_size,
fix_max_relay_log_size);
@@ -604,6 +609,9 @@ static sys_var_readonly sys_warning_count("warning_count",
OPT_SESSION,
SHOW_LONG,
get_warning_count);
+static sys_var_readonly sys_prepared_stmt_count("prepared_stmt_count",
+ OPT_GLOBAL, SHOW_LONG,
+ get_prepared_stmt_count);
/* alias for last_insert_id() to be compatible with Sybase */
#ifdef HAVE_REPLICATION
@@ -847,6 +855,8 @@ SHOW_VAR init_vars[]= {
{sys_max_join_size.name, (char*) &sys_max_join_size, SHOW_SYS},
{sys_max_length_for_sort_data.name, (char*) &sys_max_length_for_sort_data,
SHOW_SYS},
+ {sys_max_prepared_stmt_count.name, (char*) &sys_max_prepared_stmt_count,
+ SHOW_SYS},
{sys_max_relay_log_size.name, (char*) &sys_max_relay_log_size, SHOW_SYS},
{sys_max_seeks_for_key.name, (char*) &sys_max_seeks_for_key, SHOW_SYS},
{sys_max_sort_length.name, (char*) &sys_max_sort_length, SHOW_SYS},
@@ -900,6 +910,7 @@ SHOW_VAR init_vars[]= {
SHOW_SYS},
{"pid_file", (char*) pidfile_name, SHOW_CHAR},
{"plugin_dir", (char*) opt_plugin_dir, SHOW_CHAR},
+ {sys_prepared_stmt_count.name, (char*) &sys_prepared_stmt_count, SHOW_SYS},
{"port", (char*) &mysqld_port, SHOW_INT},
{sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS},
{"protocol_version", (char*) &protocol_version, SHOW_INT},
@@ -1367,29 +1378,40 @@ static void fix_server_id(THD *thd, enum_var_type type)
server_id_supplied = 1;
}
-bool sys_var_long_ptr::check(THD *thd, set_var *var)
+
+sys_var_long_ptr::
+sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+ sys_after_update_func after_update_arg)
+ :sys_var_long_ptr_global(name_arg, value_ptr,
+ &LOCK_global_system_variables, after_update_arg)
+{}
+
+
+bool sys_var_long_ptr_global::check(THD *thd, set_var *var)
{
longlong v= var->value->val_int();
var->save_result.ulonglong_value= v < 0 ? 0 : v;
return 0;
}
-bool sys_var_long_ptr::update(THD *thd, set_var *var)
+bool sys_var_long_ptr_global::update(THD *thd, set_var *var)
{
ulonglong tmp= var->save_result.ulonglong_value;
- pthread_mutex_lock(&LOCK_global_system_variables);
+ pthread_mutex_lock(guard);
if (option_limits)
*value= (ulong) getopt_ull_limit_value(tmp, option_limits);
else
*value= (ulong) tmp;
- pthread_mutex_unlock(&LOCK_global_system_variables);
+ pthread_mutex_unlock(guard);
return 0;
}
-void sys_var_long_ptr::set_default(THD *thd, enum_var_type type)
+void sys_var_long_ptr_global::set_default(THD *thd, enum_var_type type)
{
+ pthread_mutex_lock(guard);
*value= (ulong) option_limits->def_value;
+ pthread_mutex_unlock(guard);
}
@@ -2824,6 +2846,13 @@ static byte *get_error_count(THD *thd)
return (byte*) &thd->sys_var_tmp.long_value;
}
+static byte *get_prepared_stmt_count(THD *thd)
+{
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ thd->sys_var_tmp.ulong_value= prepared_stmt_count;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ return (byte*) &thd->sys_var_tmp.ulong_value;
+}
/****************************************************************************
Main handling of variables:
diff --git a/sql/set_var.h b/sql/set_var.h
index f62d6ce8d2a..8076f10bb0a 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -48,11 +48,7 @@ public:
sys_after_update_func after_update;
bool no_support_one_shot;
- sys_var(const char *name_arg)
- :name(name_arg), after_update(0)
- , no_support_one_shot(1)
- { add_sys_var(); }
- sys_var(const char *name_arg,sys_after_update_func func)
+ sys_var(const char *name_arg,sys_after_update_func func= NULL)
:name(name_arg), after_update(func)
, no_support_one_shot(1)
{ add_sys_var(); }
@@ -83,15 +79,35 @@ public:
};
-class sys_var_long_ptr :public sys_var
+/*
+ A base class for all variables that require its access to
+ be guarded with a mutex.
+*/
+
+class sys_var_global: public sys_var
+{
+protected:
+ pthread_mutex_t *guard;
+public:
+ sys_var_global(const char *name_arg, sys_after_update_func after_update_arg,
+ pthread_mutex_t *guard_arg)
+ :sys_var(name_arg, after_update_arg), guard(guard_arg) {}
+};
+
+
+/*
+ A global-only ulong variable that requires its access to be
+ protected with a mutex.
+*/
+
+class sys_var_long_ptr_global: public sys_var_global
{
public:
ulong *value;
- sys_var_long_ptr(const char *name_arg, ulong *value_ptr)
- :sys_var(name_arg),value(value_ptr) {}
- sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
- sys_after_update_func func)
- :sys_var(name_arg,func), value(value_ptr) {}
+ sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr,
+ pthread_mutex_t *guard_arg,
+ sys_after_update_func after_update_arg= NULL)
+ :sys_var_global(name_arg, after_update_arg, guard_arg), value(value_ptr) {}
bool check(THD *thd, set_var *var);
bool update(THD *thd, set_var *var);
void set_default(THD *thd, enum_var_type type);
@@ -101,6 +117,18 @@ public:
};
+/*
+ A global ulong variable that is protected by LOCK_global_system_variables
+*/
+
+class sys_var_long_ptr :public sys_var_long_ptr_global
+{
+public:
+ sys_var_long_ptr(const char *name_arg, ulong *value_ptr,
+ sys_after_update_func after_update_arg= NULL);
+};
+
+
class sys_var_ulonglong_ptr :public sys_var
{
public:
@@ -179,7 +207,7 @@ class sys_var_const_str :public sys_var
public:
char *value; // Pointer to const value
sys_var_const_str(const char *name_arg, const char *value_arg)
- :sys_var(name_arg), value((char*) value_arg)
+ :sys_var(name_arg),value((char*) value_arg)
{}
bool check(THD *thd, set_var *var)
{
@@ -226,10 +254,7 @@ public:
class sys_var_thd :public sys_var
{
public:
- sys_var_thd(const char *name_arg)
- :sys_var(name_arg)
- {}
- sys_var_thd(const char *name_arg, sys_after_update_func func)
+ sys_var_thd(const char *name_arg, sys_after_update_func func= NULL)
:sys_var(name_arg,func)
{}
bool check_type(enum_var_type type) { return 0; }
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 5702464a80d..d836bd4ff6c 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -5832,3 +5832,5 @@ ER_NULL_IN_VALUES_LESS_THAN
ER_WRONG_PARTITION_NAME
eng "Incorrect partition name"
swe "Felaktigt partitionsnamn"
+ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
+ eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 123152c95ec..63d3b053529 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -447,7 +447,7 @@ THD::~THD()
net_end(&net);
}
#endif
- stmt_map.destroy(); /* close all prepared statements */
+ stmt_map.reset(); /* close all prepared statements */
DBUG_ASSERT(lock_info.n_cursors == 0);
if (!cleanup_done)
cleanup();
@@ -1769,21 +1769,72 @@ Statement_map::Statement_map() :
}
-int Statement_map::insert(Statement *statement)
+/*
+ Insert a new statement to the thread-local statement map.
+
+ DESCRIPTION
+ If there was an old statement with the same name, replace it with the
+ new one. Otherwise, check if max_prepared_stmt_count is not reached yet,
+ increase prepared_stmt_count, and insert the new statement. It's okay
+ to delete an old statement and fail to insert the new one.
+
+ POSTCONDITIONS
+ All named prepared statements are also present in names_hash.
+ Statement names in names_hash are unique.
+ The statement is added only if prepared_stmt_count < max_prepard_stmt_count
+ last_found_statement always points to a valid statement or is 0
+
+ RETURN VALUE
+ 0 success
+ 1 error: out of resources or max_prepared_stmt_count limit has been
+ reached. An error is sent to the client, the statement is deleted.
+*/
+
+int Statement_map::insert(THD *thd, Statement *statement)
{
- int res= my_hash_insert(&st_hash, (byte *) statement);
- if (res)
- return res;
- if (statement->name.str)
+ if (my_hash_insert(&st_hash, (byte*) statement))
{
- if ((res= my_hash_insert(&names_hash, (byte*)statement)))
- {
- hash_delete(&st_hash, (byte*)statement);
- return res;
- }
+ /*
+ Delete is needed only in case of an insert failure. In all other
+ cases hash_delete will also delete the statement.
+ */
+ delete statement;
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto err_st_hash;
+ }
+ if (statement->name.str && my_hash_insert(&names_hash, (byte*) statement))
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ goto err_names_hash;
}
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ /*
+ We don't check that prepared_stmt_count is <= max_prepared_stmt_count
+ because we would like to allow to lower the total limit
+ of prepared statements below the current count. In that case
+ no new statements can be added until prepared_stmt_count drops below
+ the limit.
+ */
+ if (prepared_stmt_count >= max_prepared_stmt_count)
+ {
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+ my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0),
+ max_prepared_stmt_count);
+ goto err_max;
+ }
+ prepared_stmt_count++;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
last_found_statement= statement;
- return res;
+ return 0;
+
+err_max:
+ if (statement->name.str)
+ hash_delete(&names_hash, (byte*) statement);
+err_names_hash:
+ hash_delete(&st_hash, (byte*) statement);
+err_st_hash:
+ return 1;
}
@@ -1797,6 +1848,47 @@ void Statement_map::close_transient_cursors()
}
+void Statement_map::erase(Statement *statement)
+{
+ if (statement == last_found_statement)
+ last_found_statement= 0;
+ if (statement->name.str)
+ hash_delete(&names_hash, (byte *) statement);
+
+ hash_delete(&st_hash, (byte *) statement);
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count > 0);
+ prepared_stmt_count--;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+}
+
+
+void Statement_map::reset()
+{
+ /* Must be first, hash_free will reset st_hash.records */
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
+ prepared_stmt_count-= st_hash.records;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
+ my_hash_reset(&names_hash);
+ my_hash_reset(&st_hash);
+ last_found_statement= 0;
+}
+
+
+Statement_map::~Statement_map()
+{
+ /* Must go first, hash_free will reset st_hash.records */
+ pthread_mutex_lock(&LOCK_prepared_stmt_count);
+ DBUG_ASSERT(prepared_stmt_count >= st_hash.records);
+ prepared_stmt_count-= st_hash.records;
+ pthread_mutex_unlock(&LOCK_prepared_stmt_count);
+
+ hash_free(&names_hash);
+ hash_free(&st_hash);
+}
+
bool select_dumpvar::send_data(List<Item> &items)
{
List_iterator_fast<Item_func_set_user_var> li(vars);
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 60ff558ac48..53712aaf69e 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -545,7 +545,7 @@ class Statement_map
public:
Statement_map();
- int insert(Statement *statement);
+ int insert(THD *thd, Statement *statement);
Statement *find_by_name(LEX_STRING *name)
{
@@ -567,36 +567,16 @@ public:
}
return last_found_statement;
}
- void erase(Statement *statement)
- {
- if (statement == last_found_statement)
- last_found_statement= 0;
- if (statement->name.str)
- {
- hash_delete(&names_hash, (byte *) statement);
- }
- hash_delete(&st_hash, (byte *) statement);
- }
/*
Close all cursors of this connection that use tables of a storage
engine that has transaction-specific state and therefore can not
survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed.
*/
void close_transient_cursors();
+ void erase(Statement *statement);
/* Erase all statements (calls Statement destructor) */
- void reset()
- {
- my_hash_reset(&names_hash);
- my_hash_reset(&st_hash);
- transient_cursor_list.empty();
- last_found_statement= 0;
- }
-
- void destroy()
- {
- hash_free(&names_hash);
- hash_free(&st_hash);
- }
+ void reset();
+ ~Statement_map();
private:
HASH st_hash;
HASH names_hash;
@@ -1179,6 +1159,7 @@ public:
{
my_bool my_bool_value;
long long_value;
+ ulong ulong_value;
} sys_var_tmp;
struct {
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ced15b3f728..e0cf9095a22 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1848,10 +1848,13 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
if (! (stmt= new Prepared_statement(thd, &thd->protocol_prep)))
DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */
- if (thd->stmt_map.insert(stmt))
+ if (thd->stmt_map.insert(thd, stmt))
{
- delete stmt;
- DBUG_VOID_RETURN; /* out of memory */
+ /*
+ The error is set in the insert. The statement itself
+ will be also deleted there (this is how the hash works).
+ */
+ DBUG_VOID_RETURN;
}
/* Reset warnings from previous command */
@@ -2028,11 +2031,17 @@ void mysql_sql_stmt_prepare(THD *thd)
DBUG_VOID_RETURN; /* out of memory */
}
- if (stmt->set_name(name) || thd->stmt_map.insert(stmt))
+ /* Set the name first, insert should know that this statement has a name */
+ if (stmt->set_name(name))
{
delete stmt;
DBUG_VOID_RETURN;
}
+ if (thd->stmt_map.insert(thd, stmt))
+ {
+ /* The statement is deleted and an error is set if insert fails */
+ DBUG_VOID_RETURN;
+ }
if (stmt->prepare(query, query_len+1))
{