summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <andrey@lmy004.>2006-02-15 18:32:49 +0100
committerunknown <andrey@lmy004.>2006-02-15 18:32:49 +0100
commit751ca01452d4c7cc2bc476ca0458b30b452972f6 (patch)
tree8879fcf2a34269f345ac9088d3376012721a6a65 /sql
parent79765df94413f6398fcaf0f5b1460fd141a4af96 (diff)
parent2d94ee295b5bc3fa343449296d534bd373d2275a (diff)
downloadmariadb-git-751ca01452d4c7cc2bc476ca0458b30b452972f6.tar.gz
Merge ahristov@bk-internal.mysql.com:/home/bk/mysql-5.1-new
into lmy004.:/work/mysql-5.1-bug17289
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_archive.cc54
-rw-r--r--sql/ha_archive.h10
-rw-r--r--sql/ha_myisammrg.cc2
-rw-r--r--sql/ha_ndbcluster.cc17
-rw-r--r--sql/ha_ndbcluster_binlog.cc33
-rw-r--r--sql/item_cmpfunc.cc2
-rw-r--r--sql/lock.cc2
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/mysqld.cc20
-rw-r--r--sql/opt_range.cc8
-rw-r--r--sql/set_var.cc35
-rw-r--r--sql/set_var.h17
-rw-r--r--sql/slave.cc2
-rw-r--r--sql/sql_cache.cc6
-rw-r--r--sql/sql_plugin.cc6
-rw-r--r--sql/sql_plugin.h2
-rw-r--r--sql/sql_select.cc4
17 files changed, 144 insertions, 78 deletions
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index b6ae7a4b75f..8ed16949edd 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -126,10 +126,10 @@ static HASH archive_open_tables;
#define ARN ".ARN" // Files used during an optimize call
#define ARM ".ARM" // Meta file
/*
- uchar + uchar + ulonglong + ulonglong + ulonglong + uchar
+ uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
*/
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
- + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
+ + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
/*
uchar + uchar
@@ -313,7 +313,8 @@ error:
*rows will contain the current number of rows in the data file upon success.
*/
int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
- ulonglong *auto_increment)
+ ulonglong *auto_increment,
+ ulonglong *forced_flushes)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
@@ -336,12 +337,15 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
ptr+= sizeof(ulonglong); // Move past check_point
*auto_increment= uint8korr(ptr);
ptr+= sizeof(ulonglong); // Move past auto_increment
+ *forced_flushes= uint8korr(ptr);
+ ptr+= sizeof(ulonglong); // Move past forced_flush
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows));
DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
+ DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
@@ -359,7 +363,9 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
int ha_archive::write_meta_file(File meta_file, ha_rows rows,
- ulonglong auto_increment, bool dirty)
+ ulonglong auto_increment,
+ ulonglong forced_flushes,
+ bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
uchar *ptr= meta_buffer;
@@ -377,6 +383,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
ptr += sizeof(ulonglong);
int8store(ptr, auto_increment);
ptr += sizeof(ulonglong);
+ int8store(ptr, forced_flushes);
+ ptr += sizeof(ulonglong);
*ptr= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
(uint)ARCHIVE_CHECK_HEADER));
@@ -386,6 +394,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows,
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
auto_increment));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
+ forced_flushes));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
@@ -451,11 +461,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
leave it up to the user to fix.
*/
if (read_meta_file(share->meta_file, &share->rows_recorded,
- &share->auto_increment_value))
+ &share->auto_increment_value,
+ &share->forced_flushes))
share->crashed= TRUE;
else
(void)write_meta_file(share->meta_file, share->rows_recorded,
- share->auto_increment_value, TRUE);
+ share->auto_increment_value,
+ share->forced_flushes,
+ TRUE);
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
@@ -500,12 +513,18 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
hash_delete(&archive_open_tables, (byte*) share);
thr_lock_delete(&share->lock);
VOID(pthread_mutex_destroy(&share->mutex));
- if (share->crashed)
- (void)write_meta_file(share->meta_file, share->rows_recorded,
- share->auto_increment_value, TRUE);
- else
- (void)write_meta_file(share->meta_file, share->rows_recorded,
- share->auto_increment_value, FALSE);
+ /*
+ We need to make sure we don't reset the crashed state.
+ If we open a crashed file, wee need to close it as crashed unless
+ it has been repaired.
+ Since we will close the data down after this, we go on and count
+ the flush on close;
+ */
+ share->forced_flushes++;
+ (void)write_meta_file(share->meta_file, share->rows_recorded,
+ share->auto_increment_value,
+ share->forced_flushes,
+ share->crashed ? TRUE :FALSE);
if (azclose(&(share->archive_write)))
rc= 1;
if (my_close(share->meta_file, MYF(0)))
@@ -657,7 +676,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
}
}
- write_meta_file(create_file, 0, auto_increment_value, FALSE);
+ write_meta_file(create_file, 0, auto_increment_value, 0, FALSE);
my_close(create_file,MYF(0));
/*
@@ -800,6 +819,7 @@ int ha_archive::write_row(byte *buf)
data
*/
azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->forced_flushes++;
/*
Set the position of the local read thread to the beginning postion.
*/
@@ -897,6 +917,7 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
*/
pthread_mutex_lock(&share->mutex);
azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->forced_flushes++;
pthread_mutex_unlock(&share->mutex);
/*
@@ -974,6 +995,7 @@ int ha_archive::rnd_init(bool scan)
{
DBUG_PRINT("info", ("archive flushing out rows for scan"));
azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->forced_flushes++;
share->dirty= FALSE;
}
pthread_mutex_unlock(&share->mutex);
@@ -1149,6 +1171,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->forced_flushes++;
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
@@ -1233,13 +1256,15 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
goto error;
}
- while ((read= azread(&archive, block, IO_SIZE)))
+ while ((read= azread(&archive, block, IO_SIZE)) > 0)
azwrite(&writer, block, read);
}
azclose(&writer);
share->dirty= FALSE;
+ share->forced_flushes= 0;
azclose(&(share->archive_write));
+ DBUG_PRINT("info", ("Reopening archive data file"));
if (!(azopen(&(share->archive_write), share->data_file_name,
O_WRONLY|O_APPEND|O_BINARY)))
{
@@ -1421,6 +1446,7 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
thd->proc_info= "Checking table";
/* Flush any waiting data */
azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->forced_flushes++;
/*
First we create a buffer that we can use for reading rows, and can pass
diff --git a/sql/ha_archive.h b/sql/ha_archive.h
index 7766ae0d2e9..9b351b7e8da 100644
--- a/sql/ha_archive.h
+++ b/sql/ha_archive.h
@@ -39,6 +39,8 @@ typedef struct st_archive_share {
bool crashed; /* Meta file is crashed */
ha_rows rows_recorded; /* Number of rows in tables */
ulonglong auto_increment_value;
+ ulonglong forced_flushes;
+ ulonglong mean_rec_length;
} ARCHIVE_SHARE;
/*
@@ -98,9 +100,13 @@ public:
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
int get_row(azio_stream *file_to_read, byte *buf);
- int read_meta_file(File meta_file, ha_rows *rows, ulonglong *auto_increment);
+ int read_meta_file(File meta_file, ha_rows *rows,
+ ulonglong *auto_increment,
+ ulonglong *forced_flushes);
int write_meta_file(File meta_file, ha_rows rows,
- ulonglong auto_increment, bool dirty);
+ ulonglong auto_increment,
+ ulonglong forced_flushes,
+ bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
int free_share(ARCHIVE_SHARE *share);
bool auto_repair() const { return 1; } // For the moment we just do this
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index cc40ca0b92b..32b67cd23e5 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -116,7 +116,7 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked)
DBUG_PRINT("info", ("ha_myisammrg::open exit %d", my_errno));
return (my_errno ? my_errno : -1);
}
- DBUG_PRINT("info", ("ha_myisammrg::open myrg_extrafunc..."))
+ DBUG_PRINT("info", ("ha_myisammrg::open myrg_extrafunc..."));
myrg_extrafunc(file, query_cache_invalidate_by_MyISAM_filename_ref);
if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED ||
test_if_locked == HA_OPEN_ABORT_IF_LOCKED))
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 16fa0052214..dadb9811e87 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -2795,7 +2795,7 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
ndb_blob->getDefined(isNull);
if (isNull == 1)
{
- DBUG_PRINT("info",("[%u] NULL", col_no))
+ DBUG_PRINT("info",("[%u] NULL", col_no));
field->set_null(row_offset);
}
else if (isNull == -1)
@@ -2833,15 +2833,18 @@ void ha_ndbcluster::unpack_record(byte *buf)
const NDBCOL *hidden_col= tab->getColumn(hidden_no);
const NdbRecAttr* rec= m_value[hidden_no].rec;
DBUG_ASSERT(rec);
- DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no,
+ DBUG_PRINT("hidden", ("%d: %s \"%llu\"", hidden_no,
hidden_col->getName(), rec->u_64_value()));
- }
- //print_results();
+ }
+ //DBUG_EXECUTE("value", print_results(););
#endif
}
/*
Utility function to print/dump the fetched field
+ to avoid unnecessary work, wrap in DBUG_EXECUTE as in:
+
+ DBUG_EXECUTE("value", print_results(););
*/
void ha_ndbcluster::print_results()
@@ -2849,8 +2852,6 @@ void ha_ndbcluster::print_results()
DBUG_ENTER("print_results");
#ifndef DBUG_OFF
- if (!_db_on_)
- DBUG_VOID_RETURN;
char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH];
String type(buf_type, sizeof(buf_type), &my_charset_bin);
@@ -6450,7 +6451,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
if (!is_autocommit)
{
- DBUG_PRINT("exit", ("Can't register table during transaction"))
+ DBUG_PRINT("exit", ("Can't register table during transaction"));
DBUG_RETURN(FALSE);
}
@@ -6458,7 +6459,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
{
*engine_data= 0;
- DBUG_PRINT("exit", ("Error, could not get commitcount"))
+ DBUG_PRINT("exit", ("Error, could not get commitcount"));
DBUG_RETURN(FALSE);
}
*engine_data= commit_count;
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index b349e3320de..fadffcbb1d3 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -101,24 +101,21 @@ static TABLE_LIST binlog_tables;
#ifndef DBUG_OFF
static void print_records(TABLE *table, const char *record)
{
- if (_db_on_)
+ for (uint j= 0; j < table->s->fields; j++)
{
- for (uint j= 0; j < table->s->fields; j++)
+ char buf[40];
+ int pos= 0;
+ Field *field= table->field[j];
+ const byte* field_ptr= field->ptr - table->record[0] + record;
+ int pack_len= field->pack_length();
+ int n= pack_len < 10 ? pack_len : 10;
+
+ for (int i= 0; i < n && pos < 20; i++)
{
- char buf[40];
- int pos= 0;
- Field *field= table->field[j];
- const byte* field_ptr= field->ptr - table->record[0] + record;
- int pack_len= field->pack_length();
- int n= pack_len < 10 ? pack_len : 10;
-
- for (int i= 0; i < n && pos < 20; i++)
- {
- pos+= sprintf(&buf[pos]," %x", (int) (unsigned char) field_ptr[i]);
- }
- buf[pos]= 0;
- DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
+ pos+= sprintf(&buf[pos]," %x", (int) (unsigned char) field_ptr[i]);
}
+ buf[pos]= 0;
+ DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
}
}
#else
@@ -2490,7 +2487,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
- print_records(table, table->record[n]);
+ DBUG_EXECUTE("info", print_records(table, table->record[n]););
trans.delete_row(::server_id, injector::transaction::table(table, true),
&b, n_fields, table->record[n]);
}
@@ -2509,7 +2506,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
}
ndb_unpack_record(table, share->ndb_value[0],
&b, table->record[0]);
- print_records(table, table->record[0]);
+ DBUG_EXECUTE("info", print_records(table, table->record[0]););
if (table->s->primary_key != MAX_KEY)
{
/*
@@ -2534,7 +2531,7 @@ ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
DBUG_ASSERT(ret == 0);
}
ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
- print_records(table, table->record[1]);
+ DBUG_EXECUTE("info", print_records(table, table->record[1]););
trans.update_row(::server_id,
injector::transaction::table(table, true),
&b, n_fields,
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index c22213e774a..2014f646356 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -2861,7 +2861,7 @@ longlong Item_is_not_null_test::val_int()
}
if (args[0]->is_null())
{
- DBUG_PRINT("info", ("null"))
+ DBUG_PRINT("info", ("null"));
owner->was_null|= 1;
DBUG_RETURN(0);
}
diff --git a/sql/lock.cc b/sql/lock.cc
index 5f1141cc841..9cd0dcce610 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -622,7 +622,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count,
table_ptr[i], count,
(thd == logger.get_general_log_thd()) ||
(thd == logger.get_slow_log_thd())))
- return 0;
+ DBUG_RETURN(0);
}
if (!(sql_lock= (MYSQL_LOCK*)
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3dfcc63c33b..cd16745df90 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -5659,7 +5659,7 @@ Table_map_log_event::Table_map_log_event(THD *thd, TABLE *tbl, ulong tid,
m_data_size= TABLE_MAP_HEADER_LEN;
- DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", m_data_size= 6;)
+ DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", m_data_size= 6;);
m_data_size+= m_dblen + 2; // Include length and terminating \0
m_data_size+= m_tbllen + 2; // Include length and terminating \0
m_data_size+= 1 + m_colcnt; // COLCNT and column types
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 2b04d45064d..6e70265c8bc 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1094,12 +1094,8 @@ pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
extern "C" sig_handler print_signal_warning(int sig)
{
- if (!DBUG_IN_USE)
- {
- if (global_system_variables.log_warnings)
- sql_print_warning("Got signal %d from thread %d",
- sig,my_thread_id());
- }
+ if (global_system_variables.log_warnings)
+ sql_print_warning("Got signal %d from thread %d", sig,my_thread_id());
#ifdef DONT_REMEMBER_SIGNAL
my_sigset(sig,print_signal_warning); /* int. thread system calls */
#endif
@@ -1720,7 +1716,7 @@ void end_thread(THD *thd, bool put_in_cache)
! abort_loop && !kill_cached_threads)
{
/* Don't kill the thread, just put it in cache for reuse */
- DBUG_PRINT("info", ("Adding thread to cache"))
+ DBUG_PRINT("info", ("Adding thread to cache"));
cached_thread_count++;
while (!abort_loop && ! wake_thread && ! kill_cached_threads)
(void) pthread_cond_wait(&COND_thread_cache, &LOCK_thread_count);
@@ -1741,13 +1737,13 @@ void end_thread(THD *thd, bool put_in_cache)
}
}
- DBUG_PRINT("info", ("sending a broadcast"))
+ DBUG_PRINT("info", ("sending a broadcast"));
/* Tell main we are ready */
(void) pthread_mutex_unlock(&LOCK_thread_count);
/* It's safe to broadcast outside a lock (COND... is not deleted here) */
(void) pthread_cond_broadcast(&COND_thread_count);
- DBUG_PRINT("info", ("unlocked thread_count mutex"))
+ DBUG_PRINT("info", ("unlocked thread_count mutex"));
#ifdef ONE_THREAD
if (!(test_flags & TEST_NO_THREADS)) // For debugging under Linux
#endif
@@ -3484,8 +3480,6 @@ int win_main(int argc, char **argv)
int main(int argc, char **argv)
#endif
{
- DEBUGGER_OFF;
-
rpl_filter= new Rpl_filter;
binlog_filter= new Rpl_filter;
if (!rpl_filter || !binlog_filter)
@@ -7076,7 +7070,6 @@ static void mysql_init_variables(void)
max_system_variables.max_join_size= (ulonglong) HA_POS_ERROR;
global_system_variables.old_passwords= 0;
global_system_variables.old_alter_table= 0;
-
/*
Default behavior for 4.1 and 5.0 is to treat NULL values as unequal
when collecting index statistics for MyISAM tables.
@@ -7178,7 +7171,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
switch(optid) {
case '#':
#ifndef DBUG_OFF
- DBUG_PUSH(argument ? argument : default_dbug_option);
+ DBUG_SET(argument ? argument : default_dbug_option);
+ DBUG_SET_INITIAL(argument ? argument : default_dbug_option);
#endif
opt_endinfo=1; /* unireg: memory allocation */
break;
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index c0f1abe597c..ef6b3d941a1 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -10163,8 +10163,6 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
int idx;
char buff[1024];
DBUG_ENTER("print_sel_tree");
- if (! _db_on_)
- DBUG_VOID_RETURN;
String tmp(buff,sizeof(buff),&my_charset_bin);
tmp.length(0);
@@ -10193,9 +10191,7 @@ static void print_ror_scans_arr(TABLE *table, const char *msg,
struct st_ror_scan_info **start,
struct st_ror_scan_info **end)
{
- DBUG_ENTER("print_ror_scans");
- if (! _db_on_)
- DBUG_VOID_RETURN;
+ DBUG_ENTER("print_ror_scans_arr");
char buff[1024];
String tmp(buff,sizeof(buff),&my_charset_bin);
@@ -10259,7 +10255,7 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{
char buf[MAX_KEY/8+1];
DBUG_ENTER("print_quick");
- if (! _db_on_ || !quick)
+ if (!quick)
DBUG_VOID_RETURN;
DBUG_LOCK_FILE;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index f082e893205..c9ba454f80c 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -205,6 +205,9 @@ sys_var_long_ptr sys_concurrent_insert("concurrent_insert",
&myisam_concurrent_insert);
sys_var_long_ptr sys_connect_timeout("connect_timeout",
&connect_timeout);
+#ifndef DBUG_OFF
+sys_var_thd_dbug sys_dbug("debug");
+#endif
sys_var_enum sys_delay_key_write("delay_key_write",
&delay_key_write_options,
&delay_key_write_typelib,
@@ -720,13 +723,16 @@ SHOW_VAR init_vars[]= {
{"datadir", mysql_real_data_home, SHOW_CHAR},
{sys_date_format.name, (char*) &sys_date_format, SHOW_SYS},
{sys_datetime_format.name, (char*) &sys_datetime_format, SHOW_SYS},
+#ifndef DBUG_OFF
+ {sys_dbug.name, (char*) &sys_dbug, SHOW_SYS},
+#endif
{sys_default_week_format.name, (char*) &sys_default_week_format, SHOW_SYS},
{sys_delay_key_write.name, (char*) &sys_delay_key_write, SHOW_SYS},
{sys_delayed_insert_limit.name, (char*) &sys_delayed_insert_limit,SHOW_SYS},
{sys_delayed_insert_timeout.name, (char*) &sys_delayed_insert_timeout, SHOW_SYS},
{sys_delayed_queue_size.name,(char*) &sys_delayed_queue_size, SHOW_SYS},
{sys_div_precincrement.name,(char*) &sys_div_precincrement,SHOW_SYS},
- {sys_engine_condition_pushdown.name,
+ {sys_engine_condition_pushdown.name,
(char*) &sys_engine_condition_pushdown, SHOW_SYS},
{sys_event_executor.name, (char*) &sys_event_executor, SHOW_SYS},
{sys_expire_logs_days.name, (char*) &sys_expire_logs_days, SHOW_SYS},
@@ -3457,6 +3463,33 @@ bool sys_var_trust_routine_creators::update(THD *thd, set_var *var)
return sys_var_bool_ptr::update(thd, var);
}
+/* even session variable here requires SUPER, because of -#o,file */
+bool sys_var_thd_dbug::check(THD *thd, set_var *var)
+{
+ return check_global_access(thd, SUPER_ACL);
+}
+
+bool sys_var_thd_dbug::update(THD *thd, set_var *var)
+{
+ if (var->type == OPT_GLOBAL)
+ DBUG_SET_INITIAL(var ? var->value->str_value.c_ptr() : "");
+ else
+ {
+ DBUG_POP();
+ DBUG_PUSH(var ? var->value->str_value.c_ptr() : "");
+ }
+ return 0;
+}
+
+byte *sys_var_thd_dbug::value_ptr(THD *thd, enum_var_type type, LEX_STRING *b)
+{
+ char buf[256];
+ if (type == OPT_GLOBAL)
+ DBUG_EXPLAIN_INITIAL(buf, sizeof(buf));
+ else
+ DBUG_EXPLAIN(buf, sizeof(buf));
+ (byte*) thd->strdup(buf);
+}
/****************************************************************************
Used templates
diff --git a/sql/set_var.h b/sql/set_var.h
index 34873947483..75c36176f91 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -45,7 +45,7 @@ public:
struct my_option *option_limits; /* Updated by by set_var_init() */
uint name_length; /* Updated by by set_var_init() */
const char *name;
-
+
sys_after_update_func after_update;
bool no_support_one_shot;
sys_var(const char *name_arg)
@@ -413,7 +413,7 @@ class sys_var_thd_bit :public sys_var_thd
public:
ulong bit_flag;
bool reverse;
- sys_var_thd_bit(const char *name_arg,
+ sys_var_thd_bit(const char *name_arg,
sys_check_func c_func, sys_update_func u_func,
ulong bit, bool reverse_arg=0)
:sys_var_thd(name_arg), check_func(c_func), update_func(u_func),
@@ -427,6 +427,19 @@ public:
byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base);
};
+class sys_var_thd_dbug :public sys_var_thd
+{
+public:
+ sys_var_thd_dbug(const char *name_arg) :sys_var_thd(name_arg) {}
+ bool check_update_type(Item_result type) { return type != STRING_RESULT; }
+ bool check(THD *thd, set_var *var);
+ SHOW_TYPE type() { return SHOW_CHAR; }
+ bool update(THD *thd, set_var *var);
+ void set_default(THD *thd, enum_var_type type) { DBUG_POP(); }
+ byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b);
+};
+
+
/* some variables that require special handling */
diff --git a/sql/slave.cc b/sql/slave.cc
index 763db31c05d..e60521af3a0 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -2920,7 +2920,7 @@ static int has_temporary_error(THD *thd)
MYSQL_ERROR *err;
while ((err= it++))
{
- DBUG_PRINT("info", ("has warning %d %s", err->code, err->msg))
+ DBUG_PRINT("info", ("has warning %d %s", err->code, err->msg));
switch (err->code)
{
case ER_GET_TEMPORARY_ERRMSG:
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index aec370e104a..5b060aa13c6 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -316,13 +316,13 @@ TODO list:
#define MUTEX_UNLOCK(M) {DBUG_PRINT("lock", ("mutex unlock 0x%lx",\
(ulong)(M))); pthread_mutex_unlock(M);}
#define RW_WLOCK(M) {DBUG_PRINT("lock", ("rwlock wlock 0x%lx",(ulong)(M))); \
- if (!rw_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")) \
+ if (!rw_wrlock(M)) DBUG_PRINT("lock", ("rwlock wlock ok")); \
else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); }
#define RW_RLOCK(M) {DBUG_PRINT("lock", ("rwlock rlock 0x%lx", (ulong)(M))); \
- if (!rw_rdlock(M)) DBUG_PRINT("lock", ("rwlock rlock ok")) \
+ if (!rw_rdlock(M)) DBUG_PRINT("lock", ("rwlock rlock ok")); \
else DBUG_PRINT("lock", ("rwlock wlock FAILED %d", errno)); }
#define RW_UNLOCK(M) {DBUG_PRINT("lock", ("rwlock unlock 0x%lx",(ulong)(M))); \
- if (!rw_unlock(M)) DBUG_PRINT("lock", ("rwlock unlock ok")) \
+ if (!rw_unlock(M)) DBUG_PRINT("lock", ("rwlock unlock ok")); \
else DBUG_PRINT("lock", ("rwlock unlock FAILED %d", errno)); }
#define STRUCT_LOCK(M) {DBUG_PRINT("lock", ("%d struct lock...",__LINE__)); \
pthread_mutex_lock(M);DBUG_PRINT("lock", ("struct lock OK"));}
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 8c5579d978b..199cf4a6264 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -518,7 +518,7 @@ static int plugin_initialize(struct st_plugin_int *plugin)
sql_print_error("Plugin '%s' init function returned error.",
plugin->name.str);
DBUG_PRINT("warning", ("Plugin '%s' init function returned error.",
- plugin->name.str))
+ plugin->name.str));
goto err;
}
}
@@ -531,7 +531,7 @@ static int plugin_initialize(struct st_plugin_int *plugin)
sql_print_error("Plugin '%s' handlerton init returned error.",
plugin->name.str);
DBUG_PRINT("warning", ("Plugin '%s' handlerton init returned error.",
- plugin->name.str))
+ plugin->name.str));
goto err;
}
break;
@@ -580,7 +580,7 @@ static void plugin_call_deinitializer(void)
if (tmp->plugin->deinit())
{
DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
- tmp->name.str))
+ tmp->name.str));
}
}
tmp->state= PLUGIN_IS_UNINITIALIZED;
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index 52bfc44496b..672db105cd1 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -23,7 +23,7 @@
*/
#define SHOW_FUNC SHOW_FUNC, SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_LONGLONG, \
SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, SHOW_HAVE, \
- SHOW_HA_ROWS, SHOW_SYS, SHOW_LONG_NOFLUSH
+ SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, SHOW_LONG_NOFLUSH
#include <mysql/plugin.h>
#undef SHOW_FUNC
typedef enum enum_mysql_show_type SHOW_TYPE;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index fad382e46b0..5d01980025e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -13228,7 +13228,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
for (i= 0; (item= it++); i++)
{
Field *field;
-
+
if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
item_field= item;
else
@@ -13247,7 +13247,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
DBUG_RETURN(TRUE); // Fatal error
item_field->name= item->name;
#ifndef DBUG_OFF
- if (_db_on_ && !item_field->name)
+ if (!item_field->name)
{
char buff[256];
String str(buff,sizeof(buff),&my_charset_bin);