summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <monty@mysql.com>2004-11-08 17:53:32 +0200
committerunknown <monty@mysql.com>2004-11-08 17:53:32 +0200
commit47a5ac0e30599ea287fd206d4d21b7c304fd8c80 (patch)
tree8b26ea79c2cf98b50f42ec02a9c92e62c98f6276 /sql
parent87abc13d6fcca99cccfdc76782cbbd0b880fa9bf (diff)
parent8793e2197aeab3a2d434a180f804f295de3a3cce (diff)
downloadmariadb-git-47a5ac0e30599ea287fd206d4d21b7c304fd8c80.tar.gz
Merge with 4.1 to get new thd->mem_root handling
BitKeeper/etc/ignore: auto-union client/mysqldump.c: Auto merged client/mysqltest.c: Auto merged innobase/include/row0mysql.h: Auto merged libmysql/libmysql.c: Auto merged mysql-test/r/ctype_ucs.result: Auto merged mysql-test/r/fulltext.result: Auto merged mysql-test/r/func_in.result: Auto merged mysql-test/r/ps.result: Auto merged mysql-test/r/ps_2myisam.result: Auto merged mysql-test/r/ps_3innodb.result: Auto merged mysql-test/r/ps_4heap.result: Auto merged mysql-test/r/ps_5merge.result: Auto merged mysql-test/r/ps_6bdb.result: Auto merged mysql-test/r/type_float.result: Auto merged mysql-test/r/user_var.result: Auto merged mysql-test/t/innodb.test: Auto merged mysql-test/t/user_var.test: Auto merged mysql-test/t/variables.test: Auto merged sql/ha_berkeley.cc: Auto merged sql/ha_innodb.cc: Auto merged sql/ha_innodb.h: Auto merged sql/ha_ndbcluster.h: Auto merged sql/item.cc: Auto merged sql/item_cmpfunc.cc: Auto merged sql/item_func.cc: Auto merged sql/item_sum.cc: Auto merged sql/item_timefunc.cc: Auto merged sql/log_event.cc: Auto merged sql/mysql_priv.h: Auto merged sql/mysqld.cc: Auto merged sql/repl_failsafe.cc: Auto merged sql/set_var.cc: Auto merged sql/sql_acl.cc: Auto merged sql/sql_db.cc: Auto merged sql/sql_error.cc: Auto merged sql/sql_help.cc: Auto merged sql/sql_insert.cc: Auto merged sql/sql_prepare.cc: Auto merged sql/sql_select.h: Auto merged sql/sql_yacc.yy: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_berkeley.cc10
-rw-r--r--sql/ha_innodb.cc30
-rw-r--r--sql/ha_innodb.h4
-rw-r--r--sql/ha_ndbcluster.cc153
-rw-r--r--sql/ha_ndbcluster.h23
-rw-r--r--sql/item.cc4
-rw-r--r--sql/item_cmpfunc.cc11
-rw-r--r--sql/item_func.cc28
-rw-r--r--sql/item_subselect.cc35
-rw-r--r--sql/item_sum.cc30
-rw-r--r--sql/item_timefunc.cc6
-rw-r--r--sql/log_event.cc8
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/mysqld.cc2
-rw-r--r--sql/opt_range.cc38
-rw-r--r--sql/repl_failsafe.cc2
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/sql_acl.cc10
-rw-r--r--sql/sql_base.cc22
-rw-r--r--sql/sql_class.cc18
-rw-r--r--sql/sql_class.h32
-rw-r--r--sql/sql_db.cc2
-rw-r--r--sql/sql_error.cc6
-rw-r--r--sql/sql_help.cc14
-rw-r--r--sql/sql_insert.cc4
-rw-r--r--sql/sql_parse.cc20
-rw-r--r--sql/sql_prepare.cc6
-rw-r--r--sql/sql_select.cc19
-rw-r--r--sql/sql_select.h2
-rw-r--r--sql/sql_union.cc18
-rw-r--r--sql/sql_yacc.yy16
-rw-r--r--sql/table.cc14
-rw-r--r--sql/thr_malloc.cc2
33 files changed, 333 insertions, 260 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index a5d0023b875..ff6b10fe504 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -234,13 +234,13 @@ int berkeley_show_logs(Protocol *protocol)
{
char **all_logs, **free_logs, **a, **f;
int error=1;
- MEM_ROOT show_logs_root;
- MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
+ MEM_ROOT **root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC);
+ MEM_ROOT show_logs_root, *old_mem_root= *root_ptr;
DBUG_ENTER("berkeley_show_logs");
init_sql_alloc(&show_logs_root, BDB_LOG_ALLOC_BLOCK_SIZE,
BDB_LOG_ALLOC_BLOCK_SIZE);
- my_pthread_setspecific_ptr(THR_MALLOC,&show_logs_root);
+ *root_ptr= &show_logs_root;
if ((error= db_env->log_archive(db_env, &all_logs,
DB_ARCH_ABS | DB_ARCH_LOG)) ||
@@ -277,15 +277,17 @@ int berkeley_show_logs(Protocol *protocol)
}
err:
free_root(&show_logs_root,MYF(0));
- my_pthread_setspecific_ptr(THR_MALLOC,old_root);
+ *root_ptr= old_mem_root;
DBUG_RETURN(error);
}
+
static void berkeley_print_error(const char *db_errpfx, char *buffer)
{
sql_print_error("%s: %s",db_errpfx,buffer); /* purecov: tested */
}
+
static void berkeley_noticecall(DB_ENV *db_env, db_notices notice)
{
switch (notice)
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index efd74a543c2..2a77111e95b 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -2318,6 +2318,34 @@ ha_innobase::write_row(
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
+ if (user_thd->lex->sql_command == SQLCOM_ALTER_TABLE
+ && num_write_row >= 10000) {
+ /* ALTER TABLE is COMMITted at every 10000 copied rows.
+ The IX table lock for the original table has to be re-issued.
+ As this method will be called on a temporary table where the
+ contents of the original table is being copied to, it is
+ a bit tricky to determine the source table. The cursor
+ position in the source table need not be adjusted after the
+ intermediate COMMIT, since writes by other transactions are
+ being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */
+ ut_a(prebuilt->trx->mysql_n_tables_locked == 2);
+ ut_a(UT_LIST_GET_LEN(prebuilt->trx->trx_locks) >= 2);
+ dict_table_t* table = lock_get_ix_table(
+ UT_LIST_GET_FIRST(prebuilt->trx->trx_locks));
+ num_write_row = 0;
+ /* Commit the transaction. This will release the table
+ locks, so they have to be acquired again. */
+ innobase_commit(user_thd, prebuilt->trx);
+ /* Note that this transaction is still active. */
+ user_thd->transaction.all.innodb_active_trans = 1;
+ /* Re-acquire the IX table lock on the source table. */
+ row_lock_table_for_mysql(prebuilt, table);
+ /* We will need an IX lock on the destination table. */
+ prebuilt->sql_stat_start = TRUE;
+ }
+
+ num_write_row++;
+
if (last_query_id != user_thd->query_id) {
prebuilt->sql_stat_start = TRUE;
last_query_id = user_thd->query_id;
@@ -4913,7 +4941,7 @@ ha_innobase::external_lock(
if (thd->in_lock_tables &&
thd->variables.innodb_table_locks) {
ulint error;
- error = row_lock_table_for_mysql(prebuilt);
+ error = row_lock_table_for_mysql(prebuilt, 0);
if (error != DB_SUCCESS) {
error = convert_error_code_to_mysql(
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 57e136a8fba..9c9b6990fc3 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -64,6 +64,7 @@ class ha_innobase: public handler
uint last_match_mode;/* match mode of the latest search:
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
or undefined */
+ uint num_write_row; /* number of write_row() calls */
longlong auto_inc_counter_for_this_stat;
ulong max_supported_row_length(const byte *buf);
@@ -85,7 +86,8 @@ class ha_innobase: public handler
HA_PRIMARY_KEY_IN_READ_INDEX |
HA_TABLE_SCAN_ON_INDEX),
last_dup_key((uint) -1),
- start_of_scan(0)
+ start_of_scan(0),
+ num_write_row(0)
{
}
~ha_innobase() {}
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index ebbbaf8acc8..b2e115e9779 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -324,7 +324,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d",
err.code, res));
if (res == HA_ERR_FOUND_DUPP_KEY)
- dupkey= table->primary_key;
+ m_dupkey= table->primary_key;
DBUG_RETURN(res);
}
@@ -551,7 +551,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
blob_size+= 8 - blob_size % 8;
if (loop == 1)
{
- char *buf= blobs_buffer + offset;
+ char *buf= m_blobs_buffer + offset;
uint32 len= 0xffffffff; // Max uint32
DBUG_PRINT("value", ("read blob ptr=%x len=%u",
(uint)buf, (uint)blob_len));
@@ -563,15 +563,15 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob)
offset+= blob_size;
}
}
- if (loop == 0 && offset > blobs_buffer_size)
+ if (loop == 0 && offset > m_blobs_buffer_size)
{
- my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
- blobs_buffer_size= 0;
+ my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ m_blobs_buffer_size= 0;
DBUG_PRINT("value", ("allocate blobs buffer size %u", offset));
- blobs_buffer= my_malloc(offset, MYF(MY_WME));
- if (blobs_buffer == NULL)
+ m_blobs_buffer= my_malloc(offset, MYF(MY_WME));
+ if (m_blobs_buffer == NULL)
DBUG_RETURN(-1);
- blobs_buffer_size= offset;
+ m_blobs_buffer_size= offset;
}
}
DBUG_RETURN(0);
@@ -854,7 +854,7 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type)
{
if (type >= TL_WRITE_ALLOW_WRITE)
return NdbOperation::LM_Exclusive;
- else if (uses_blob_value(retrieve_all_fields))
+ else if (uses_blob_value(m_retrieve_all_fields))
return NdbOperation::LM_Read;
else
return NdbOperation::LM_CommittedRead;
@@ -1018,7 +1018,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf)
{
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
- retrieve_all_fields)
+ m_retrieve_all_fields)
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(trans->getNdbError());
@@ -1055,7 +1055,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
THD *thd= current_thd;
DBUG_ENTER("complemented_pk_read");
- if (retrieve_all_fields)
+ if (m_retrieve_all_fields)
// We have allready retrieved all fields, nothing to complement
DBUG_RETURN(0);
@@ -1192,12 +1192,12 @@ inline int ha_ndbcluster::next_result(byte *buf)
/*
We can only handle one tuple with blobs at a time.
*/
- if (ops_pending && blobs_pending)
+ if (m_ops_pending && m_blobs_pending)
{
if (execute_no_commit(this,trans) != 0)
DBUG_RETURN(ndb_err(trans));
- ops_pending= 0;
- blobs_pending= FALSE;
+ m_ops_pending= 0;
+ m_blobs_pending= FALSE;
}
check= cursor->nextResult(contact_ndb);
if (check == 0)
@@ -1219,8 +1219,8 @@ inline int ha_ndbcluster::next_result(byte *buf)
all pending update or delete operations should
be sent to NDB
*/
- DBUG_PRINT("info", ("ops_pending: %d", ops_pending));
- if (ops_pending)
+ DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending));
+ if (m_ops_pending)
{
if (current_thd->transaction.on)
{
@@ -1234,7 +1234,7 @@ inline int ha_ndbcluster::next_result(byte *buf)
int res= trans->restart();
DBUG_ASSERT(res == 0);
}
- ops_pending= 0;
+ m_ops_pending= 0;
}
contact_ndb= (check == 2);
@@ -1423,7 +1423,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
Field *field= table->field[i];
if ((thd->query_id == field->query_id) ||
(field->flags & PRI_KEY_FLAG) ||
- retrieve_all_fields)
+ m_retrieve_all_fields)
{
if (get_ndb_value(op, field, i, buf))
ERR_RETURN(op->getNdbError());
@@ -1668,9 +1668,9 @@ int ha_ndbcluster::write_row(byte *record)
if (has_auto_increment)
{
- skip_auto_increment= FALSE;
+ m_skip_auto_increment= FALSE;
update_auto_increment();
- skip_auto_increment= !auto_increment_column_changed;
+ m_skip_auto_increment= !auto_increment_column_changed;
}
if ((res= set_primary_key(op)))
@@ -1685,7 +1685,7 @@ int ha_ndbcluster::write_row(byte *record)
if (!(field->flags & PRI_KEY_FLAG) &&
set_ndb_value(op, field, i, &set_blob_value))
{
- skip_auto_increment= TRUE;
+ m_skip_auto_increment= TRUE;
ERR_RETURN(op->getNdbError());
}
}
@@ -1697,25 +1697,25 @@ int ha_ndbcluster::write_row(byte *record)
to NoCommit the transaction between each row.
Find out how this is detected!
*/
- rows_inserted++;
+ m_rows_inserted++;
no_uncommitted_rows_update(1);
- bulk_insert_not_flushed= TRUE;
- if ((rows_to_insert == 1) ||
- ((rows_inserted % bulk_insert_rows) == 0) ||
+ m_bulk_insert_not_flushed= TRUE;
+ if ((m_rows_to_insert == 1) ||
+ ((m_rows_inserted % m_bulk_insert_rows) == 0) ||
set_blob_value)
{
THD *thd= current_thd;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
- (int)rows_inserted, (int)bulk_insert_rows));
+ (int)m_rows_inserted, (int)m_bulk_insert_rows));
- bulk_insert_not_flushed= FALSE;
+ m_bulk_insert_not_flushed= FALSE;
if (thd->transaction.on)
{
if (execute_no_commit(this,trans) != 0)
{
- skip_auto_increment= TRUE;
+ m_skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -1724,7 +1724,7 @@ int ha_ndbcluster::write_row(byte *record)
{
if (execute_commit(this,trans) != 0)
{
- skip_auto_increment= TRUE;
+ m_skip_auto_increment= TRUE;
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -1732,7 +1732,7 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_ASSERT(res == 0);
}
}
- if ((has_auto_increment) && (skip_auto_increment))
+ if ((has_auto_increment) && (m_skip_auto_increment))
{
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
DBUG_PRINT("info",
@@ -1742,7 +1742,7 @@ int ha_ndbcluster::write_row(byte *record)
DBUG_PRINT("info",
("Setting next auto increment value to %u", next_val));
}
- skip_auto_increment= TRUE;
+ m_skip_auto_increment= TRUE;
DBUG_RETURN(0);
}
@@ -1822,7 +1822,9 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
}
// Delete old row
DBUG_PRINT("info", ("insert succeded"));
+ m_primary_key_update= TRUE;
delete_res= delete_row(old_data);
+ m_primary_key_update= FALSE;
if (delete_res)
{
DBUG_PRINT("info", ("delete failed"));
@@ -1845,9 +1847,9 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
DBUG_PRINT("info", ("Calling updateTuple on cursor"));
if (!(op= cursor->updateTuple()))
ERR_RETURN(trans->getNdbError());
- ops_pending++;
+ m_ops_pending++;
if (uses_blob_value(FALSE))
- blobs_pending= TRUE;
+ m_blobs_pending= TRUE;
}
else
{
@@ -1924,7 +1926,7 @@ int ha_ndbcluster::delete_row(const byte *record)
DBUG_PRINT("info", ("Calling deleteTuple on cursor"));
if (cursor->deleteTuple() != 0)
ERR_RETURN(trans->getNdbError());
- ops_pending++;
+ m_ops_pending++;
no_uncommitted_rows_update(-1);
@@ -1954,8 +1956,10 @@ int ha_ndbcluster::delete_row(const byte *record)
else
{
int res;
- if ((res= set_primary_key(op)))
- return res;
+ if ((res= (m_primary_key_update ?
+ set_primary_key_from_old_data(op, record)
+ : set_primary_key(op))))
+ return res;
}
}
@@ -2417,18 +2421,18 @@ int ha_ndbcluster::close_scan()
DBUG_RETURN(1);
- if (ops_pending)
+ if (m_ops_pending)
{
/*
Take over any pending transactions to the
deleteing/updating transaction before closing the scan
*/
- DBUG_PRINT("info", ("ops_pending: %d", ops_pending));
+ DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending));
if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
- ops_pending= 0;
+ m_ops_pending= 0;
}
cursor->close();
@@ -2563,7 +2567,7 @@ void ha_ndbcluster::info(uint flag)
if (flag & HA_STATUS_ERRKEY)
{
DBUG_PRINT("info", ("HA_STATUS_ERRKEY"));
- errkey= dupkey;
+ errkey= m_dupkey;
}
if (flag & HA_STATUS_AUTO)
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
@@ -2672,7 +2676,7 @@ int ha_ndbcluster::extra(enum ha_extra_function operation)
where field->query_id is the same as
the current query id */
DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS"));
- retrieve_all_fields= TRUE;
+ m_retrieve_all_fields= TRUE;
break;
case HA_EXTRA_PREPARE_FOR_DELETE:
DBUG_PRINT("info", ("HA_EXTRA_PREPARE_FOR_DELETE"));
@@ -2718,8 +2722,8 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
DBUG_ENTER("start_bulk_insert");
DBUG_PRINT("enter", ("rows: %d", (int)rows));
- rows_inserted= 0;
- rows_to_insert= rows;
+ m_rows_inserted= 0;
+ m_rows_to_insert= rows;
/*
Calculate how many rows that should be inserted
@@ -2733,7 +2737,7 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows)
batch= bytesperbatch/bytes;
batch= batch == 0 ? 1 : batch;
DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes));
- bulk_insert_rows= batch;
+ m_bulk_insert_rows= batch;
DBUG_VOID_RETURN;
}
@@ -2747,22 +2751,22 @@ int ha_ndbcluster::end_bulk_insert()
DBUG_ENTER("end_bulk_insert");
// Check if last inserts need to be flushed
- if (bulk_insert_not_flushed)
+ if (m_bulk_insert_not_flushed)
{
NdbConnection *trans= m_active_trans;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
"rows_inserted:%d, bulk_insert_rows: %d",
- rows_inserted, bulk_insert_rows));
- bulk_insert_not_flushed= FALSE;
+ m_rows_inserted, m_bulk_insert_rows));
+ m_bulk_insert_not_flushed= FALSE;
if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans);
}
}
- rows_inserted= 0;
- rows_to_insert= 1;
+ m_rows_inserted= 0;
+ m_rows_to_insert= 1;
DBUG_RETURN(error);
}
@@ -2948,8 +2952,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
(NdbConnection*)thd->transaction.stmt.ndb_tid;
DBUG_ASSERT(m_active_trans);
// Start of transaction
- retrieve_all_fields= FALSE;
- ops_pending= 0;
+ m_retrieve_all_fields= FALSE;
+ m_ops_pending= 0;
{
NDBDICT *dict= m_ndb->getDictionary();
const NDBTAB *tab;
@@ -2997,13 +3001,13 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
DBUG_PRINT("warning", ("m_active_cursor != NULL"));
m_active_cursor= NULL;
- if (blobs_pending)
+ if (m_blobs_pending)
DBUG_PRINT("warning", ("blobs_pending != 0"));
- blobs_pending= 0;
+ m_blobs_pending= 0;
- if (ops_pending)
+ if (m_ops_pending)
DBUG_PRINT("warning", ("ops_pending != 0L"));
- ops_pending= 0;
+ m_ops_pending= 0;
}
DBUG_RETURN(error);
}
@@ -3040,8 +3044,8 @@ int ha_ndbcluster::start_stmt(THD *thd)
m_active_trans= trans;
// Start of statement
- retrieve_all_fields= FALSE;
- ops_pending= 0;
+ m_retrieve_all_fields= FALSE;
+ m_ops_pending= 0;
DBUG_RETURN(error);
}
@@ -3583,10 +3587,10 @@ ulonglong ha_ndbcluster::get_auto_increment()
Uint64 auto_value;
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
- cache_size= ((rows_to_insert - rows_inserted < autoincrement_prefetch) ?
- rows_to_insert - rows_inserted :
- max(rows_to_insert, autoincrement_prefetch));
- auto_value= ((skip_auto_increment) ?
+ cache_size= ((m_rows_to_insert - m_rows_inserted < autoincrement_prefetch) ?
+ m_rows_to_insert - m_rows_inserted :
+ max(m_rows_to_insert, autoincrement_prefetch));
+ auto_value= ((m_skip_auto_increment) ?
m_ndb->readAutoIncrementValue((const NDBTAB *) m_table) :
m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size));
DBUG_RETURN((ulonglong) auto_value);
@@ -3611,17 +3615,18 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
m_share(0),
m_use_write(FALSE),
m_ignore_dup_key_not_supported(FALSE),
- retrieve_all_fields(FALSE),
- rows_to_insert(1),
- rows_inserted(0),
- bulk_insert_rows(1024),
- bulk_insert_not_flushed(FALSE),
- ops_pending(0),
- skip_auto_increment(TRUE),
- blobs_pending(0),
- blobs_buffer(0),
- blobs_buffer_size(0),
- dupkey((uint) -1)
+ m_primary_key_update(FALSE),
+ m_retrieve_all_fields(FALSE),
+ m_rows_to_insert(1),
+ m_rows_inserted(0),
+ m_bulk_insert_rows(1024),
+ m_bulk_insert_not_flushed(FALSE),
+ m_ops_pending(0),
+ m_skip_auto_increment(TRUE),
+ m_blobs_pending(0),
+ m_blobs_buffer(0),
+ m_blobs_buffer_size(0),
+ m_dupkey((uint) -1)
{
int i;
@@ -3655,8 +3660,8 @@ ha_ndbcluster::~ha_ndbcluster()
if (m_share)
free_share(m_share);
release_metadata();
- my_free(blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
- blobs_buffer= 0;
+ my_free(m_blobs_buffer, MYF(MY_ALLOW_ZERO_PTR));
+ m_blobs_buffer= 0;
// Check for open cursor/transaction
if (m_active_cursor) {
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index fc1e4390adb..d61876b1357 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -243,18 +243,19 @@ class ha_ndbcluster: public handler
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
bool m_use_write;
bool m_ignore_dup_key_not_supported;
- bool retrieve_all_fields;
- ha_rows rows_to_insert;
- ha_rows rows_inserted;
- ha_rows bulk_insert_rows;
- bool bulk_insert_not_flushed;
- ha_rows ops_pending;
- bool skip_auto_increment;
- bool blobs_pending;
+ bool m_primary_key_update;
+ bool m_retrieve_all_fields;
+ ha_rows m_rows_to_insert;
+ ha_rows m_rows_inserted;
+ ha_rows m_bulk_insert_rows;
+ bool m_bulk_insert_not_flushed;
+ ha_rows m_ops_pending;
+ bool m_skip_auto_increment;
+ bool m_blobs_pending;
// memory for blobs in one tuple
- char *blobs_buffer;
- uint32 blobs_buffer_size;
- uint dupkey;
+ char *m_blobs_buffer;
+ uint32 m_blobs_buffer_size;
+ uint m_dupkey;
void set_rec_per_key();
void records_update();
diff --git a/sql/item.cc b/sql/item.cc
index 14ba097cf6d..063f3ab1f53 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1370,6 +1370,10 @@ bool Item_param::convert_str_value(THD *thd)
value.cs_info.character_set_client,
value.cs_info.final_character_set_of_str_value);
}
+ else
+ str_value.set_charset(value.cs_info.final_character_set_of_str_value);
+ /* Here str_value is guaranteed to be in final_character_set_of_str_value */
+
max_length= str_value.length();
decimals= 0;
/*
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index b08a6fcd2e6..905250ed96f 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1771,12 +1771,13 @@ void Item_func_in::fix_length_and_dec()
Conversion is possible:
All IN arguments are constants.
*/
- Item_arena *arena= thd->current_arena, backup;
- if (arena->is_stmt_prepare())
- thd->set_n_backup_item_arena(arena, &backup);
+ Item_arena *arena, backup;
+ arena= thd->change_arena_if_needed(&backup);
+
for (arg= args+1, arg_end= args+arg_count; arg < arg_end; arg++)
{
- if (!my_charset_same(cmp_collation.collation,
+ if (!arg[0]->null_value &&
+ !my_charset_same(cmp_collation.collation,
arg[0]->collation.collation))
{
Item_string *conv;
@@ -1790,7 +1791,7 @@ void Item_func_in::fix_length_and_dec()
arg[0]= conv;
}
}
- if (arena->is_stmt_prepare())
+ if (arena)
thd->restore_backup_item_arena(arena, &backup);
}
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 583371aed32..82912f8a0fa 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -163,14 +163,13 @@ bool Item_func::agg_arg_charsets(DTCollation &coll,
}
THD *thd= current_thd;
- Item_arena *arena= thd->current_arena, backup;
+ Item_arena *arena, backup;
bool res= FALSE;
/*
In case we're in statement prepare, create conversion item
in its memory: it will be reused on each execute.
*/
- if (arena->is_stmt_prepare())
- thd->set_n_backup_item_arena(arena, &backup);
+ arena= thd->change_arena_if_needed(&backup);
for (arg= args, last= args + nargs; arg < last; arg++)
{
@@ -198,7 +197,7 @@ bool Item_func::agg_arg_charsets(DTCollation &coll,
conv->fix_fields(thd, 0, &conv);
*arg= conv;
}
- if (arena->is_stmt_prepare())
+ if (arena)
thd->restore_backup_item_arena(arena, &backup);
return res;
}
@@ -2429,6 +2428,7 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
entry->value=0;
entry->length=0;
entry->update_query_id=0;
+ entry->collation.set(NULL, DERIVATION_NONE);
/*
If we are here, we were called from a SET or a query which sets a
variable. Imagine it is this:
@@ -2470,7 +2470,24 @@ bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables,
is different from query_id).
*/
entry->update_query_id= thd->query_id;
- entry->collation.set(args[0]->collation);
+ /*
+ As it is wrong and confusing to associate any
+ character set with NULL, @a should be latin2
+ after this query sequence:
+
+ SET @a=_latin2'string';
+ SET @a=NULL;
+
+ I.e. the second query should not change the charset
+ to the current default value, but should keep the
+ original value assigned during the first query.
+ In order to do it, we don't copy charset
+ from the argument if the argument is NULL
+ and the variable has previously been initialized.
+ */
+ if (!entry->collation.collation || !args[0]->null_value)
+ entry->collation.set(args[0]->collation);
+ collation.set(entry->collation);
cached_result_type= args[0]->result_type();
return 0;
}
@@ -2498,7 +2515,6 @@ bool Item_func_set_user_var::update_hash(void *ptr, uint length,
my_free(entry->value,MYF(0));
entry->value=0;
entry->length=0;
- entry->collation.set(cs, dv);
}
else
{
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index bafca8acf0f..f426f14a25f 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -190,15 +190,16 @@ bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref)
bool Item_subselect::exec()
{
int res;
- MEM_ROOT *old_root= my_pthread_getspecific_ptr(MEM_ROOT*, THR_MALLOC);
- if (&thd->mem_root != old_root)
- {
- my_pthread_setspecific_ptr(THR_MALLOC, &thd->mem_root);
- res= engine->exec();
- my_pthread_setspecific_ptr(THR_MALLOC, old_root);
- }
- else
- res= engine->exec();
+ MEM_ROOT *old_root= thd->mem_root;
+
+ /*
+ As this is execution, all objects should be allocated through the main
+ mem root
+ */
+ thd->mem_root= &thd->main_mem_root;
+ res= engine->exec();
+ thd->mem_root= old_root;
+
if (engine_changed)
{
engine_changed= 0;
@@ -661,15 +662,10 @@ Item_in_subselect::single_value_transformer(JOIN *join,
}
SELECT_LEX *select_lex= join->select_lex;
- Item_arena *arena= thd->current_arena, backup;
-
+ Item_arena *arena, backup;
+ arena= thd->change_arena_if_needed(&backup);
thd->where= "scalar IN/ALL/ANY subquery";
- if (arena->is_conventional())
- arena= 0; // For easier test
- else
- thd->set_n_backup_item_arena(arena, &backup);
-
/*
Check that the right part of the subselect contains no more than one
column. E.g. in SELECT 1 IN (SELECT * ..) the right part is (SELECT * ...)
@@ -928,11 +924,8 @@ Item_in_subselect::row_value_transformer(JOIN *join)
}
thd->where= "row IN/ALL/ANY subquery";
- Item_arena *arena= thd->current_arena, backup;
- if (arena->is_conventional())
- arena= 0;
- else
- thd->set_n_backup_item_arena(arena, &backup);
+ Item_arena *arena, backup;
+ arena= thd->change_arena_if_needed(&backup);
if (select_lex->item_list.elements != left_expr->cols())
{
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 743b3eee58c..28d77cdf8b5 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -255,7 +255,7 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
Item *Item_sum_sum::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_sum(thd, this);
+ return new (thd->mem_root) Item_sum_sum(thd, this);
}
@@ -399,7 +399,7 @@ double Item_sum_sum_distinct::val()
Item *Item_sum_count::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_count(thd, this);
+ return new (thd->mem_root) Item_sum_count(thd, this);
}
@@ -444,7 +444,7 @@ void Item_sum_count::cleanup()
Item *Item_sum_avg::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_avg(thd, this);
+ return new (thd->mem_root) Item_sum_avg(thd, this);
}
@@ -491,7 +491,7 @@ double Item_sum_std::val()
Item *Item_sum_std::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_std(thd, this);
+ return new (thd->mem_root) Item_sum_std(thd, this);
}
@@ -501,7 +501,7 @@ Item *Item_sum_std::copy_or_same(THD* thd)
Item *Item_sum_variance::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_variance(thd, this);
+ return new (thd->mem_root) Item_sum_variance(thd, this);
}
@@ -663,7 +663,7 @@ void Item_sum_hybrid::cleanup()
Item *Item_sum_min::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_min(thd, this);
+ return new (thd->mem_root) Item_sum_min(thd, this);
}
@@ -716,7 +716,7 @@ bool Item_sum_min::add()
Item *Item_sum_max::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_max(thd, this);
+ return new (thd->mem_root) Item_sum_max(thd, this);
}
@@ -783,7 +783,7 @@ void Item_sum_bit::clear()
Item *Item_sum_or::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_or(thd, this);
+ return new (thd->mem_root) Item_sum_or(thd, this);
}
@@ -797,7 +797,7 @@ bool Item_sum_or::add()
Item *Item_sum_xor::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_xor(thd, this);
+ return new (thd->mem_root) Item_sum_xor(thd, this);
}
@@ -811,7 +811,7 @@ bool Item_sum_xor::add()
Item *Item_sum_and::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_and(thd, this);
+ return new (thd->mem_root) Item_sum_and(thd, this);
}
@@ -1449,7 +1449,7 @@ int Item_sum_count_distinct::tree_to_myisam()
Item *Item_sum_count_distinct::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_count_distinct(thd, this);
+ return new (thd->mem_root) Item_sum_count_distinct(thd, this);
}
@@ -1550,7 +1550,7 @@ bool Item_udf_sum::add()
Item *Item_sum_udf_float::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_udf_float(thd, this);
+ return new (thd->mem_root) Item_sum_udf_float(thd, this);
}
double Item_sum_udf_float::val()
@@ -1575,7 +1575,7 @@ String *Item_sum_udf_float::val_str(String *str)
Item *Item_sum_udf_int::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_udf_int(thd, this);
+ return new (thd->mem_root) Item_sum_udf_int(thd, this);
}
@@ -1613,7 +1613,7 @@ void Item_sum_udf_str::fix_length_and_dec()
Item *Item_sum_udf_str::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_sum_udf_str(thd, this);
+ return new (thd->mem_root) Item_sum_udf_str(thd, this);
}
@@ -1927,7 +1927,7 @@ Item_func_group_concat::~Item_func_group_concat()
Item *Item_func_group_concat::copy_or_same(THD* thd)
{
- return new (&thd->mem_root) Item_func_group_concat(thd, this);
+ return new (thd->mem_root) Item_func_group_concat(thd, this);
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index fd7a2ea2cb4..566cacca487 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -164,10 +164,10 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
CHARSET_INFO *cs= &my_charset_bin;
int error= 0;
bool usa_time= 0;
- bool sunday_first_n_first_week_non_iso;
+ bool sunday_first_n_first_week_non_iso= -2;
bool strict_week_number;
int strict_week_number_year= -1;
- bool strict_week_number_year_type;
+ bool strict_week_number_year_type= -1;
int frac_part;
const char *val_begin= val;
const char *val_end= val + length;
@@ -175,9 +175,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
const char *end= ptr + format->format.length;
DBUG_ENTER("extract_date_time");
- LINT_INIT(sunday_first_n_first_week_non_iso);
LINT_INIT(strict_week_number);
- LINT_INIT(strict_week_number_year_type);
if (!sub_pattern_end)
bzero((char*) l_time, sizeof(*l_time));
diff --git a/sql/log_event.cc b/sql/log_event.cc
index ee5af6ed1c0..c7f6f25e74a 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1535,7 +1535,7 @@ end:
thd->query_length= thd->db_length =0;
VOID(pthread_mutex_unlock(&LOCK_thread_count));
close_thread_tables(thd);
- free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
/*
If there was an error we stop. Otherwise we increment positions. Note that
we will not increment group* positions if we are just after a SET
@@ -2614,10 +2614,10 @@ Slave: load data infile on table '%s' at log position %s in log \
slave_print_error(rli,sql_errno,"\
Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
err, (char*)table_name, print_slave_db_safe(db));
- free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
return 1;
}
- free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
if (thd->is_fatal_error)
{
@@ -3250,7 +3250,7 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli)
*/
e.fix_fields(thd, 0, 0);
e.update_hash(val, val_len, type, charset, DERIVATION_NONE);
- free_root(&thd->mem_root,0);
+ free_root(thd->mem_root,0);
rli->inc_event_relay_log_pos();
return 0;
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 960f054b3ac..0a6b499ea4f 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -994,7 +994,7 @@ extern char *default_tz_name;
extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log;
extern FILE *bootstrap_file;
-extern pthread_key(MEM_ROOT*,THR_MALLOC);
+extern pthread_key(MEM_ROOT**,THR_MALLOC);
extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open,
LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_generator,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 6214481e273..196b7af46b1 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -375,7 +375,7 @@ SHOW_COMP_OPTION have_crypt, have_compress;
/* Thread specific variables */
-pthread_key(MEM_ROOT*,THR_MALLOC);
+pthread_key(MEM_ROOT**,THR_MALLOC);
pthread_key(THD*, THR_THD);
pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_mapped_file, LOCK_status,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index b832b33049d..997edbdf155 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -724,7 +724,7 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
{
// Allocates everything through the internal memroot
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
- my_pthread_setspecific_ptr(THR_MALLOC,&alloc);
+ thd->mem_root= &alloc;
}
else
bzero((char*) &alloc,sizeof(alloc));
@@ -1041,7 +1041,7 @@ QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param,
rowid_length= table->file->ref_length;
record= head->record[0];
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
- my_pthread_setspecific_ptr(THR_MALLOC,&alloc);
+ thd_param->malloc= &alloc;
}
@@ -1476,7 +1476,8 @@ public:
KEY_PART_INFO *min_max_arg_part_arg,
uint group_prefix_len_arg, uint used_key_parts_arg,
uint group_key_parts_arg, KEY *index_info_arg,
- uint index_arg, uint key_infix_len_arg, byte *key_infix_arg,
+ uint index_arg, uint key_infix_len_arg,
+ byte *key_infix_arg,
SEL_TREE *tree_arg, SEL_ARG *index_tree_arg,
uint param_idx_arg, ha_rows quick_prefix_records_arg)
: have_min(have_min_arg), have_max(have_max_arg),
@@ -1486,12 +1487,10 @@ public:
index(index_arg), key_infix_len(key_infix_len_arg), range_tree(tree_arg),
index_tree(index_tree_arg), param_idx(param_idx_arg),
quick_prefix_records(quick_prefix_records_arg)
-{
- if (key_infix_len)
- memcpy(this->key_infix, key_infix_arg, key_infix_len);
-}
-
-
+ {
+ if (key_infix_len)
+ memcpy(this->key_infix, key_infix_arg, key_infix_len);
+ }
QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows,
MEM_ROOT *parent_alloc);
@@ -1649,8 +1648,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_RETURN(0); // Can't use range
}
key_parts= param.key_parts;
- old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
- my_pthread_setspecific_ptr(THR_MALLOC,&alloc);
+ old_root= thd->mem_root;
+ thd->mem_root= &alloc;
/*
Make an array with description of all key parts of all table keys.
@@ -1795,7 +1794,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
}
}
- my_pthread_setspecific_ptr(THR_MALLOC, old_root);
+ thd->mem_root= old_root;
/* If we got a read plan, create a quick select from it. */
if (best_trp)
@@ -1809,8 +1808,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
}
free_mem:
- my_pthread_setspecific_ptr(THR_MALLOC, old_root);
free_root(&alloc,MYF(0)); // Return memory & allocator
+ thd->mem_root= old_root;
thd->no_errors=0;
}
@@ -5229,7 +5228,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts)
NOTES
The caller must call QUICK_SELECT::init for returned quick select
- CAUTION! This function may change THR_MALLOC to a MEM_ROOT which will be
+ CAUTION! This function may change thd->mem_root to a MEM_ROOT which will be
deallocated when the returned quick select is deleted.
RETURN
@@ -5473,7 +5472,8 @@ bool QUICK_ROR_UNION_SELECT::check_if_keys_used(List<Item> *fields)
QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
TABLE_REF *ref)
{
- MEM_ROOT *old_root= my_pthread_getspecific_ptr(MEM_ROOT*, THR_MALLOC);
+ MEM_ROOT *old_root= thd->mem_root;
+ /* The following call may change thd->mem_root */
QUICK_RANGE_SELECT *quick= new QUICK_RANGE_SELECT(thd, table, ref->key, 0);
KEY *key_info = &table->key_info[ref->key];
KEY_PART *key_part;
@@ -5534,11 +5534,11 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
}
ok:
- my_pthread_setspecific_ptr(THR_MALLOC, old_root);
+ thd->mem_root= old_root;
return quick;
err:
- my_pthread_setspecific_ptr(THR_MALLOC, old_root);
+ thd->mem_root= old_root;
delete quick;
return 0;
}
@@ -7497,10 +7497,10 @@ QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg,
if (!parent_alloc)
{
init_sql_alloc(&alloc, join->thd->variables.range_alloc_block_size, 0);
- my_pthread_setspecific_ptr(THR_MALLOC,&alloc);
+ thd->mem_root= &alloc;
}
else
- bzero(&alloc, sizeof(MEM_ROOT));
+ bzero(&alloc, sizeof(MEM_ROOT)); // ensure that it's not used
}
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index dc23b014e31..253b2c96545 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -92,7 +92,7 @@ static int init_failsafe_rpl_thread(THD* thd)
VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals));
#endif
- thd->mem_root.free=thd->mem_root.used=0;
+ thd->mem_root->free= thd->mem_root->used= 0;
if (thd->variables.max_join_size == HA_POS_ERROR)
thd->options|= OPTION_BIG_SELECTS;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 8ffed33d442..ccac3082d5b 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -1193,7 +1193,7 @@ static void fix_max_connections(THD *thd, enum_var_type type)
static void fix_thd_mem_root(THD *thd, enum_var_type type)
{
if (type != OPT_GLOBAL)
- reset_root_defaults(&thd->mem_root,
+ reset_root_defaults(thd->mem_root,
thd->variables.query_alloc_block_size,
thd->variables.query_prealloc_size);
}
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index bed3130462d..e7b3e13274f 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -2426,8 +2426,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
create_new_users= test_if_create_new_users(thd);
int result=0;
rw_wrlock(&LOCK_grant);
- MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
- my_pthread_setspecific_ptr(THR_MALLOC,&memex);
+ MEM_ROOT *old_root= thd->mem_root;
+ thd->mem_root= &memex;
while ((Str = str_list++))
{
@@ -2535,7 +2535,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
}
}
grant_option=TRUE;
- my_pthread_setspecific_ptr(THR_MALLOC,old_root);
+ thd->mem_root= old_root;
rw_unlock(&LOCK_grant);
if (!result)
send_ok(thd);
@@ -2667,6 +2667,7 @@ my_bool grant_init(THD *org_thd)
THD *thd;
TABLE_LIST tables[2];
MYSQL_LOCK *lock;
+ MEM_ROOT *memex_ptr;
my_bool return_val= 1;
TABLE *t_table, *c_table;
bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE;
@@ -2714,7 +2715,8 @@ my_bool grant_init(THD *org_thd)
grant_option= TRUE;
/* Will be restored by org_thd->store_globals() */
- my_pthread_setspecific_ptr(THR_MALLOC,&memex);
+ memex_ptr= &memex;
+ my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr);
do
{
GRANT_TABLE *mem_check;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index b3be1fc7338..a1481bb2522 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -508,7 +508,7 @@ void close_temporary_tables(THD *thd)
*/
query_buf_size+= table->key_length+1;
- if ((query = alloc_root(&thd->mem_root, query_buf_size)))
+ if ((query = alloc_root(thd->mem_root, query_buf_size)))
// Better add "if exists", in case a RESET MASTER has been done
end=strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ");
@@ -2597,24 +2597,20 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields,
List<Item> *sum_func_list,
uint wild_num)
{
- Item *item;
- DBUG_ENTER("setup_wild");
-
if (!wild_num)
- DBUG_RETURN(0);
+ return(0);
- Item_arena *arena= thd->current_arena, backup;
+ Item *item;
+ List_iterator<Item> it(fields);
+ Item_arena *arena, backup;
+ DBUG_ENTER("setup_wild");
/*
Don't use arena if we are not in prepared statements or stored procedures
For PS/SP we have to use arena to remember the changes
*/
- if (arena->is_conventional())
- arena= 0; // For easier test later one
- else
- thd->set_n_backup_item_arena(arena, &backup);
+ arena= thd->change_arena_if_needed(&backup);
- List_iterator<Item> it(fields);
while (wild_num && (item= it++))
{
if (item->type() == Item::FIELD_ITEM &&
@@ -3108,7 +3104,7 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
}
if (arena)
- thd->set_n_backup_item_arena(arena, &backup);
+ arena= thd->change_arena_if_needed(&backup);
TABLE *t1=tab1->table;
TABLE *t2=tab2->table;
@@ -3209,7 +3205,7 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
embedding->nested_join->join_list.head() == embedded);
}
- if (arena)
+ if (!thd->current_arena->is_conventional())
{
/*
We are in prepared statement preparation code => we should store
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 5ed5468af2f..15e1cc7e212 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -316,7 +316,7 @@ void THD::init_for_queries()
{
ha_enable_transaction(this,TRUE);
- reset_root_defaults(&mem_root, variables.query_alloc_block_size,
+ reset_root_defaults(mem_root, variables.query_alloc_block_size,
variables.query_prealloc_size);
reset_root_defaults(&transaction.mem_root,
variables.trans_alloc_block_size,
@@ -431,7 +431,7 @@ THD::~THD()
dbug_sentry= THD_SENTRY_GONE;
#endif
/* Reset stmt_backup.mem_root to not double-free memory from thd.mem_root */
- clear_alloc_root(&stmt_backup.mem_root);
+ clear_alloc_root(&stmt_backup.main_mem_root);
DBUG_VOID_RETURN;
}
@@ -1465,10 +1465,10 @@ void select_dumpvar::cleanup()
for memory root initialization.
*/
Item_arena::Item_arena(THD* thd)
- :free_list(0),
- state(INITIALIZED)
+ :free_list(0), mem_root(&main_mem_root),
+ state(INITIALIZED)
{
- init_sql_alloc(&mem_root,
+ init_sql_alloc(&main_mem_root,
thd->variables.query_alloc_block_size,
thd->variables.query_prealloc_size);
}
@@ -1491,11 +1491,11 @@ Item_arena::Item_arena(THD* thd)
statements.
*/
Item_arena::Item_arena(bool init_mem_root)
- :free_list(0),
+ :free_list(0), mem_root(&main_mem_root),
state(CONVENTIONAL_EXECUTION)
{
if (init_mem_root)
- init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
+ init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0);
}
@@ -1626,14 +1626,14 @@ void Item_arena::restore_backup_item_arena(Item_arena *set, Item_arena *backup)
void Item_arena::set_item_arena(Item_arena *set)
{
- mem_root= set->mem_root;
+ mem_root= set->mem_root;
free_list= set->free_list;
state= set->state;
}
Statement::~Statement()
{
- free_root(&mem_root, MYF(0));
+ free_root(&main_mem_root, MYF(0));
}
C_MODE_START
diff --git a/sql/sql_class.h b/sql/sql_class.h
index fcaebe64b43..987e557c37c 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -515,7 +515,8 @@ public:
itself to the list on creation (see Item::Item() for details))
*/
Item *free_list;
- MEM_ROOT mem_root;
+ MEM_ROOT main_mem_root;
+ MEM_ROOT *mem_root; // Pointer to current memroot
#ifndef DBUG_OFF
bool backup_arena;
#endif
@@ -557,24 +558,24 @@ public:
{ return state == PREPARED || state == EXECUTED; }
inline bool is_conventional() const
{ return state == CONVENTIONAL_EXECUTION; }
- inline gptr alloc(unsigned int size) { return alloc_root(&mem_root,size); }
+ inline gptr alloc(unsigned int size) { return alloc_root(mem_root,size); }
inline gptr calloc(unsigned int size)
{
gptr ptr;
- if ((ptr=alloc_root(&mem_root,size)))
+ if ((ptr=alloc_root(mem_root,size)))
bzero((char*) ptr,size);
return ptr;
}
inline char *strdup(const char *str)
- { return strdup_root(&mem_root,str); }
+ { return strdup_root(mem_root,str); }
inline char *strmake(const char *str, uint size)
- { return strmake_root(&mem_root,str,size); }
+ { return strmake_root(mem_root,str,size); }
inline char *memdup(const char *str, uint size)
- { return memdup_root(&mem_root,str,size); }
+ { return memdup_root(mem_root,str,size); }
inline char *memdup_w_gap(const char *str, uint size, uint gap)
{
gptr ptr;
- if ((ptr=alloc_root(&mem_root,size+gap)))
+ if ((ptr=alloc_root(mem_root,size+gap)))
memcpy(ptr,str,size);
return ptr;
}
@@ -1167,11 +1168,26 @@ public:
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
+ inline Item_arena *change_arena_if_needed(Item_arena *backup)
+ {
+ /*
+ use new arena if we are in a prepared statements and we have not
+ already changed to use this arena.
+ */
+ if (current_arena->is_stmt_prepare() &&
+ mem_root != &current_arena->main_mem_root)
+ {
+ set_n_backup_item_arena(current_arena, backup);
+ return current_arena;
+ }
+ return 0;
+ }
+
void change_item_tree(Item **place, Item *new_value)
{
/* TODO: check for OOM condition here */
if (!current_arena->is_conventional())
- nocheck_register_item_tree_change(place, *place, &mem_root);
+ nocheck_register_item_tree_change(place, *place, mem_root);
*place= new_value;
}
void nocheck_register_item_tree_change(Item **place, Item *old_value,
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 982ad4a34a9..7a100d05b93 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -724,7 +724,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db,
if ((mysql_rm_known_files(thd, new_dirp, NullS, newpath,1)) < 0)
goto err;
if (!(copy_of_path= thd->memdup(newpath, length+1)) ||
- !(dir= new (&thd->mem_root) String(copy_of_path, length,
+ !(dir= new (thd->mem_root) String(copy_of_path, length,
&my_charset_bin)) ||
raid_dirs.push_back(dir))
goto err;
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index c09bfe0aea8..3fbefdd37ef 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -133,11 +133,11 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
The following code is here to change the allocation to not
use the thd->mem_root, which is freed after each query
*/
- MEM_ROOT *old_root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
- my_pthread_setspecific_ptr(THR_MALLOC, &thd->warn_root);
+ MEM_ROOT *old_root= thd->mem_root;
+ thd->mem_root= &thd->warn_root;
if ((err= new MYSQL_ERROR(thd, code, level, msg)))
thd->warn_list.push_back(err);
- my_pthread_setspecific_ptr(THR_MALLOC, old_root);
+ thd->mem_root= old_root;
}
thd->warn_count[(uint) level]++;
thd->total_warn_count++;
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index b349a09e49e..b3d7bebe96a 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -126,7 +126,7 @@ void memorize_variant_topic(THD *thd, TABLE *topics, int count,
String *name, String *description, String *example)
{
DBUG_ENTER("memorize_variant_topic");
- MEM_ROOT *mem_root= &thd->mem_root;
+ MEM_ROOT *mem_root= thd->mem_root;
if (count==0)
{
get_field(mem_root,find_fields[help_topic_name].field, name);
@@ -137,7 +137,7 @@ void memorize_variant_topic(THD *thd, TABLE *topics, int count,
{
if (count == 1)
names->push_back(name);
- String *new_name= new (&thd->mem_root) String;
+ String *new_name= new (thd->mem_root) String;
get_field(mem_root,find_fields[help_topic_name].field,new_name);
names->push_back(new_name);
}
@@ -350,8 +350,8 @@ int search_categories(THD *thd, TABLE *categories,
{
if (select && !select->cond->val_int())
continue;
- String *lname= new (&thd->mem_root) String;
- get_field(&thd->mem_root,pfname,lname);
+ String *lname= new (thd->mem_root) String;
+ get_field(thd->mem_root,pfname,lname);
if (++count == 1 && res_id)
*res_id= (int16) pcat_id->val_int();
names->push_back(lname);
@@ -384,8 +384,8 @@ void get_all_items_for_category(THD *thd, TABLE *items, Field *pfname,
{
if (!select->cond->val_int())
continue;
- String *name= new (&thd->mem_root) String();
- get_field(&thd->mem_root,pfname,name);
+ String *name= new (thd->mem_root) String();
+ get_field(thd->mem_root,pfname,name);
res->push_back(name);
}
end_read_record(&read_record_info);
@@ -638,7 +638,7 @@ int mysqld_help(THD *thd, const char *mask)
String name, description, example;
int res, count_topics, count_categories, error;
uint mlen= strlen(mask);
- MEM_ROOT *mem_root= &thd->mem_root;
+ MEM_ROOT *mem_root= thd->mem_root;
if ((res= open_and_lock_tables(thd, tables)))
goto end;
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 63e11822f6e..71b427da8de 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -211,7 +211,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list,
{
/* it should be allocated before Item::fix_fields() */
table->insert_values=
- (byte *)alloc_root(&thd->mem_root, table->rec_buff_length);
+ (byte *)alloc_root(thd->mem_root, table->rec_buff_length);
if (!table->insert_values)
goto abort;
}
@@ -1124,7 +1124,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
found_next_number_field=table->found_next_number_field;
for (org_field=table->field ; *org_field ; org_field++,field++)
{
- if (!(*field= (*org_field)->new_field(&client_thd->mem_root,copy)))
+ if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy)))
return 0;
(*field)->orig_table= copy; // Remove connection
(*field)->move_field(adjust_ptrs); // Point at copy->record[0]
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 35bad134127..b293a4119ff 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1146,14 +1146,14 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg)
{
thd->net.error = 0;
close_thread_tables(thd); // Free tables
- free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
break;
}
mysql_parse(thd,thd->query,length);
close_thread_tables(thd); // Free tables
if (thd->is_fatal_error)
break;
- free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
free_root(&thd->transaction.mem_root,MYF(MY_KEEP_PREALLOC));
}
@@ -1859,7 +1859,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
VOID(pthread_mutex_unlock(&LOCK_thread_count));
thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory
- free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC));
+ free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
DBUG_RETURN(error);
}
@@ -2144,7 +2144,7 @@ mysql_execute_command(THD *thd)
query_len= need_conversion? (pstr->length() * to_cs->mbmaxlen) :
pstr->length();
- if (!(query_str= alloc_root(&thd->mem_root, query_len+1)))
+ if (!(query_str= alloc_root(thd->mem_root, query_len+1)))
{
res= -1;
break; // EOM (error should be reported by allocator)
@@ -4490,8 +4490,8 @@ mysql_init_select(LEX *lex)
bool
mysql_new_select(LEX *lex, bool move_down)
{
- SELECT_LEX *select_lex = new(&lex->thd->mem_root) SELECT_LEX();
- if (!select_lex)
+ SELECT_LEX *select_lex;
+ if (!(select_lex= new(lex->thd->mem_root) SELECT_LEX()))
return 1;
select_lex->select_number= ++lex->thd->select_number;
select_lex->init_query();
@@ -4500,9 +4500,10 @@ mysql_new_select(LEX *lex, bool move_down)
if (move_down)
{
/* first select_lex of subselect or derived table */
- SELECT_LEX_UNIT *unit= new(&lex->thd->mem_root) SELECT_LEX_UNIT();
- if (!unit)
+ SELECT_LEX_UNIT *unit;
+ if (!(unit= new(lex->thd->mem_root) SELECT_LEX_UNIT()))
return 1;
+
unit->init_query();
unit->init_select();
unit->thd= lex->thd;
@@ -4529,7 +4530,8 @@ mysql_new_select(LEX *lex, bool move_down)
as far as we included SELECT_LEX for UNION unit should have
fake SELECT_LEX for UNION processing
*/
- fake= unit->fake_select_lex= new(&lex->thd->mem_root) SELECT_LEX();
+ if (!(fake= unit->fake_select_lex= new(lex->thd->mem_root) SELECT_LEX()))
+ return 1;
fake->include_standalone(unit,
(SELECT_LEX_NODE**)&unit->fake_select_lex);
fake->select_number= INT_MAX;
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index d908e63210a..4f94bfb6c05 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1070,7 +1070,7 @@ static int mysql_test_select(Prepared_statement *stmt,
DBUG_RETURN(1);
#endif
- if (!lex->result && !(lex->result= new (&stmt->mem_root) select_send))
+ if (!lex->result && !(lex->result= new (stmt->mem_root) select_send))
{
send_error(thd);
goto err;
@@ -1518,7 +1518,7 @@ static bool init_param_array(Prepared_statement *stmt)
List_iterator<Item_param> param_iterator(lex->param_list);
/* Use thd->mem_root as it points at statement mem_root */
stmt->param_array= (Item_param **)
- alloc_root(&stmt->thd->mem_root,
+ alloc_root(stmt->thd->mem_root,
sizeof(Item_param*) * stmt->param_count);
if (!stmt->param_array)
{
@@ -1584,7 +1584,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length,
if (name)
{
stmt->name.length= name->length;
- if (!(stmt->name.str= memdup_root(&stmt->mem_root, (char*)name->str,
+ if (!(stmt->name.str= memdup_root(stmt->mem_root, (char*)name->str,
name->length)))
{
delete stmt;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 53c66e9b192..d154e3e93c1 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -5293,8 +5293,17 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/* Join with outer join condition */
COND *orig_cond=sel->cond;
sel->cond= and_conds(sel->cond, *tab->on_expr_ref);
+
+ /*
+ We can't call sel->cond->fix_fields,
+ as it will break tab->on_expr if it's AND condition
+ (fix_fields currently removes extra AND/OR levels).
+ Yet attributes of the just built condition are not needed.
+ Thus we call sel->cond->quick_fix_field for safety.
+ */
if (sel->cond && !sel->cond->fixed)
- sel->cond->fix_fields(join->thd, 0, &sel->cond);
+ sel->cond->quick_fix_field();
+
if (sel->test_quick_select(join->thd, tab->keys,
used_tables & ~ current_map,
(join->select_options &
@@ -7491,7 +7500,7 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field,
org_field->field_name, table,
org_field->charset());
else
- new_field= org_field->new_field(&thd->mem_root, table);
+ new_field= org_field->new_field(thd->mem_root, table);
if (new_field)
{
if (modify_item)
@@ -8094,7 +8103,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (!using_unique_constraint)
{
group->buff=(char*) group_buff;
- if (!(group->field=field->new_field(&thd->mem_root,table)))
+ if (!(group->field=field->new_field(thd->mem_root,table)))
goto err; /* purecov: inspected */
if (maybe_null)
{
@@ -11692,7 +11701,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
saved value
*/
Field *field= item->field;
- item->result_field=field->new_field(&thd->mem_root,field->table);
+ item->result_field=field->new_field(thd->mem_root,field->table);
char *tmp=(char*) sql_alloc(field->pack_length()+1);
if (!tmp)
goto err;
@@ -12139,7 +12148,7 @@ bool JOIN::rollup_init()
return 1;
rollup.ref_pointer_arrays= (Item***) (rollup.fields + send_group_parts);
ref_array= (Item**) (rollup.ref_pointer_arrays+send_group_parts);
- rollup.item_null= new (&thd->mem_root) Item_null();
+ rollup.item_null= new (thd->mem_root) Item_null();
/*
Prepare space for field list for the different levels
diff --git a/sql/sql_select.h b/sql/sql_select.h
index d489e911363..e1bf60f6896 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -426,7 +426,7 @@ class store_key :public Sql_alloc
field_arg->table, field_arg->charset());
else
{
- to_field=field_arg->new_field(&thd->mem_root,field_arg->table);
+ to_field=field_arg->new_field(thd->mem_root,field_arg->table);
if (to_field)
to_field->move_field(ptr, (uchar*) null, 1);
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 46f11683e4e..20d762eba55 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -314,27 +314,23 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result,
thd_arg->lex->current_select= lex_select_save;
if (!item_list.elements)
{
- Field **field;
- Item_arena backup;
- if (arena->is_conventional())
- arena= 0;
- else
- thd->set_n_backup_item_arena(arena, &backup);
+ Item_arena *tmp_arena,backup;
+ tmp_arena= thd->change_arena_if_needed(&backup);
for (field= table->field; *field; field++)
{
Item_field *item= new Item_field(*field);
if (!item || item_list.push_back(item))
{
- if (arena)
- thd->restore_backup_item_arena(arena, &backup);
+ if (tmp_arena)
+ thd->restore_backup_item_arena(tmp_arena, &backup);
DBUG_RETURN(-1);
}
}
- if (arena)
+ if (tmp_arena)
+ thd->restore_backup_item_arena(tmp_arena, &backup);
+ if (arena->is_stmt_prepare())
{
- thd->restore_backup_item_arena(arena, &backup);
-
/* prepare fake select to initialize it correctly */
ulong options_tmp= init_prepare_fake_select_lex(thd);
/*
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index c658d2ae016..82646e1db36 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -4781,7 +4781,7 @@ opt_distinct:
|DISTINCT { $$ = 1; };
opt_gconcat_separator:
- /* empty */ { $$ = new (&YYTHD->mem_root) String(",",1,default_charset_info); }
+ /* empty */ { $$ = new (YYTHD->mem_root) String(",",1,default_charset_info); }
|SEPARATOR_SYM text_string { $$ = $2; };
@@ -5070,15 +5070,15 @@ key_list_or_empty:
key_usage_list2:
key_usage_list2 ',' ident
{ Select->
- interval_list.push_back(new (&YYTHD->mem_root) String((const char*) $3.str, $3.length,
+ interval_list.push_back(new (YYTHD->mem_root) String((const char*) $3.str, $3.length,
system_charset_info)); }
| ident
{ Select->
- interval_list.push_back(new (&YYTHD->mem_root) String((const char*) $1.str, $1.length,
+ interval_list.push_back(new (YYTHD->mem_root) String((const char*) $1.str, $1.length,
system_charset_info)); }
| PRIMARY_SYM
{ Select->
- interval_list.push_back(new (&YYTHD->mem_root) String("PRIMARY", 7,
+ interval_list.push_back(new (YYTHD->mem_root) String("PRIMARY", 7,
system_charset_info)); };
using_list:
@@ -6128,7 +6128,7 @@ opt_db:
wild:
/* empty */
| LIKE TEXT_STRING_sys
- { Lex->wild= new (&YYTHD->mem_root) String($2.str, $2.length,
+ { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length,
system_charset_info); };
opt_full:
@@ -6182,7 +6182,7 @@ opt_describe_column:
/* empty */ {}
| text_string { Lex->wild= $1; }
| ident
- { Lex->wild= new (&YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info); };
+ { Lex->wild= new (YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info); };
/* flush things */
@@ -6428,7 +6428,7 @@ text_literal:
text_string:
TEXT_STRING_literal
- { $$= new (&YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); }
+ { $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); }
| HEX_NUM
{
Item *tmp = new Item_varbinary($1.str,$1.length);
@@ -7700,7 +7700,7 @@ column_list:
column_list_id:
ident
{
- String *new_str = new (&YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info);
+ String *new_str = new (YYTHD->mem_root) String((const char*) $1.str,$1.length,system_charset_info);
List_iterator <LEX_COLUMN> iter(Lex->columns);
class LEX_COLUMN *point;
LEX *lex=Lex;
diff --git a/sql/table.cc b/sql/table.cc
index 0116cf180c1..71999b2c3d4 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -83,7 +83,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
uchar *null_pos;
uint null_bit, new_frm_ver, field_pack_length;
SQL_CRYPT *crypted=0;
- MEM_ROOT *old_root;
+ MEM_ROOT **root_ptr, *old_root;
DBUG_ENTER("openfrm");
DBUG_PRINT("enter",("name: '%s' form: 0x%lx",name,outparam));
@@ -123,7 +123,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
outparam->blob_ptr_size=sizeof(char*);
outparam->db_stat = db_stat;
init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0);
- my_pthread_setspecific_ptr(THR_MALLOC,&outparam->mem_root);
+ root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC);
+ old_root= *root_ptr;
+ *root_ptr= &outparam->mem_root;
outparam->real_name=strdup_root(&outparam->mem_root,
name+dirname_length(name));
@@ -274,9 +276,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
#ifdef HAVE_CRYPTED_FRM
else if (*(head+26) == 2)
{
- my_pthread_setspecific_ptr(THR_MALLOC,old_root);
+ *root_ptr= old_root
crypted=get_crypt_for_frm();
- my_pthread_setspecific_ptr(THR_MALLOC,&outparam->mem_root);
+ *root_ptr= &outparam->mem_root;
outparam->crypted=1;
}
#endif
@@ -762,7 +764,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
}
outparam->db_low_byte_first=outparam->file->low_byte_first();
- my_pthread_setspecific_ptr(THR_MALLOC,old_root);
+ *root_ptr= old_root;
thd->status_var.opened_tables++;
#ifndef DBUG_OFF
if (use_hash)
@@ -782,7 +784,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
err_end: /* Here when no file */
delete crypted;
- my_pthread_setspecific_ptr(THR_MALLOC,old_root);
+ *root_ptr= old_root;
frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG);
delete outparam->file;
outparam->file=0; // For easier errorchecking
diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc
index 0df60858bcb..3a9ca397bba 100644
--- a/sql/thr_malloc.cc
+++ b/sql/thr_malloc.cc
@@ -38,7 +38,7 @@ void init_sql_alloc(MEM_ROOT *mem_root, uint block_size, uint pre_alloc)
gptr sql_alloc(uint Size)
{
- MEM_ROOT *root=my_pthread_getspecific_ptr(MEM_ROOT*,THR_MALLOC);
+ MEM_ROOT *root= *my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC);
char *ptr= (char*) alloc_root(root,Size);
return ptr;
}