summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <serg@serg.mylan>2005-03-29 14:58:03 +0200
committerunknown <serg@serg.mylan>2005-03-29 14:58:03 +0200
commit37695aaf35e39e186bffacf41009671ce58ab529 (patch)
tree9abe8a641c1d3796b1e0b420a1b8b2d85a9bdd0a /sql
parenta004c5101586d5c522a52c7cb84933bbd8037a4a (diff)
parent939901661c31f51ec4426c37435a56e663ba9826 (diff)
downloadmariadb-git-37695aaf35e39e186bffacf41009671ce58ab529.tar.gz
Merge bk-internal:/home/bk/mysql-5.0
into serg.mylan:/usr/home/serg/Abk/mysql-5.0
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am5
-rw-r--r--sql/ha_blackhole.cc188
-rw-r--r--sql/ha_blackhole.h88
-rw-r--r--sql/handler.cc9
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item_func.cc17
-rw-r--r--sql/item_func.h3
-rw-r--r--sql/item_strfunc.h4
-rw-r--r--sql/log_event.cc88
-rw-r--r--sql/log_event.h16
-rw-r--r--sql/mysql_priv.h1
-rw-r--r--sql/mysqld.cc66
-rw-r--r--sql/set_var.cc1
-rw-r--r--sql/share/errmsg.txt4
-rw-r--r--sql/slave.cc24
-rw-r--r--sql/slave.h9
-rw-r--r--sql/sql_acl.cc8
-rw-r--r--sql/sql_load.cc42
-rw-r--r--sql/sql_prepare.cc14
-rw-r--r--sql/sql_select.cc5
-rw-r--r--sql/sql_select.h2
-rw-r--r--sql/sql_show.cc142
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/structs.h2
-rw-r--r--sql/table.h2
25 files changed, 610 insertions, 134 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index bd371cd77bf..e0ff324b33c 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -62,9 +62,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \
parse_file.h sql_view.h sql_trigger.h \
examples/ha_example.h examples/ha_archive.h \
- examples/ha_tina.h \
+ examples/ha_tina.h ha_blackhole.h \
ha_federated.h
-
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -99,7 +98,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \
sp_cache.cc parse_file.cc sql_trigger.cc \
examples/ha_example.cc examples/ha_archive.cc \
- examples/ha_tina.cc \
+ examples/ha_tina.cc ha_blackhole.cc \
ha_federated.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc
new file mode 100644
index 00000000000..e34d5d723a4
--- /dev/null
+++ b/sql/ha_blackhole.cc
@@ -0,0 +1,188 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include <mysql_priv.h>
+
+#ifdef HAVE_BLACKHOLE_DB
+#include "ha_blackhole.h"
+
+
+const char **ha_blackhole::bas_ext() const
+{
+ static const char *ext[]= { NullS };
+ return ext;
+}
+
+int ha_blackhole::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_blackhole::open");
+ thr_lock_init(&thr_lock);
+ thr_lock_data_init(&thr_lock,&lock,NULL);
+ DBUG_RETURN(0);
+}
+
+int ha_blackhole::close(void)
+{
+ DBUG_ENTER("ha_blackhole::close");
+ thr_lock_delete(&thr_lock);
+ DBUG_RETURN(0);
+}
+
+int ha_blackhole::create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info)
+{
+ DBUG_ENTER("ha_blackhole::create");
+ DBUG_RETURN(0);
+}
+
+const char *ha_blackhole::index_type(uint key_number)
+{
+ DBUG_ENTER("ha_blackhole::index_type");
+ DBUG_RETURN((table->key_info[key_number].flags & HA_FULLTEXT) ?
+ "FULLTEXT" :
+ (table->key_info[key_number].flags & HA_SPATIAL) ?
+ "SPATIAL" :
+ (table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
+ "RTREE" :
+ "BTREE");
+}
+
+int ha_blackhole::write_row(byte * buf)
+{
+ DBUG_ENTER("ha_blackhole::write_row");
+ DBUG_RETURN(0);
+}
+
+int ha_blackhole::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_blackhole::rnd_init");
+ DBUG_RETURN(0);
+}
+
+
+int ha_blackhole::rnd_next(byte *buf)
+{
+ DBUG_ENTER("ha_blackhole::rnd_next");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+int ha_blackhole::rnd_pos(byte * buf, byte *pos)
+{
+ DBUG_ENTER("ha_blackhole::rnd_pos");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(0);
+}
+
+
+void ha_blackhole::position(const byte *record)
+{
+ DBUG_ENTER("ha_blackhole::position");
+ DBUG_ASSERT(0);
+ DBUG_VOID_RETURN;
+}
+
+
+void ha_blackhole::info(uint flag)
+{
+ DBUG_ENTER("ha_blackhole::info");
+
+ records= 0;
+ deleted= 0;
+ errkey= 0;
+ mean_rec_length= 0;
+ data_file_length= 0;
+ index_file_length= 0;
+ max_data_file_length= 0;
+ delete_length= 0;
+ if (flag & HA_STATUS_AUTO)
+ auto_increment_value= 1;
+ DBUG_VOID_RETURN;
+}
+
+int ha_blackhole::external_lock(THD *thd, int lock_type)
+{
+ DBUG_ENTER("ha_blackhole::external_lock");
+ DBUG_RETURN(0);
+}
+
+
+THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ *to++= &lock;
+
+ return to;
+}
+
+
+int ha_blackhole::index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag)
+{
+ DBUG_ENTER("ha_blackhole::index_read");
+ DBUG_RETURN(0);
+}
+
+
+int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag)
+{
+ DBUG_ENTER("ha_blackhole::index_read_idx");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+int ha_blackhole::index_read_last(byte * buf, const byte * key, uint key_len)
+{
+ DBUG_ENTER("ha_blackhole::index_read_last");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+int ha_blackhole::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_blackhole::index_next");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+int ha_blackhole::index_prev(byte * buf)
+{
+ DBUG_ENTER("ha_blackhole::index_prev");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+int ha_blackhole::index_first(byte * buf)
+{
+ DBUG_ENTER("ha_blackhole::index_first");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+
+int ha_blackhole::index_last(byte * buf)
+{
+ DBUG_ENTER("ha_blackhole::index_last");
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+}
+
+#endif /* HAVE_BLACKHOLE_DB */
diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h
new file mode 100644
index 00000000000..b6f924e94b9
--- /dev/null
+++ b/sql/ha_blackhole.h
@@ -0,0 +1,88 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef __GNUC__
+#pragma interface /* gcc class implementation */
+#endif
+
+/*
+ Class definition for the blackhole storage engine
+ "Dumbest named feature ever"
+*/
+class ha_blackhole: public handler
+{
+ THR_LOCK_DATA lock; /* MySQL lock */
+ THR_LOCK thr_lock;
+
+public:
+ ha_blackhole(TABLE *table): handler(table)
+ {
+ }
+ ~ha_blackhole()
+ {
+ }
+ /* The name that will be used for display purposes */
+ const char *table_type() const { return "BLACKHOLE"; }
+ /*
+ The name of the index type that will be used for display
+ don't implement this method unless you really have indexes
+ */
+ const char *index_type(uint key_number);
+ const char **bas_ext() const;
+ ulong table_flags() const
+ {
+ return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
+ HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
+ HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
+ HA_CAN_INSERT_DELAYED);
+ }
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
+ 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
+ HA_READ_ORDER | HA_KEYREAD_ONLY);
+ }
+ /* The following defines can be increased if necessary */
+#define BLACKHOLE_MAX_KEY 64 /* Max allowed keys */
+#define BLACKHOLE_MAX_KEY_SEG 16 /* Max segments for key */
+#define BLACKHOLE_MAX_KEY_LENGTH 1000
+ uint max_supported_keys() const { return BLACKHOLE_MAX_KEY; }
+ uint max_supported_key_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
+ uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+ int write_row(byte * buf);
+ int rnd_init(bool scan);
+ int rnd_next(byte *buf);
+ int rnd_pos(byte * buf, byte *pos);
+ int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_last(byte * buf, const byte * key, uint key_len);
+ int index_next(byte * buf);
+ int index_prev(byte * buf);
+ int index_first(byte * buf);
+ int index_last(byte * buf);
+ void position(const byte *record);
+ void info(uint flag);
+ int external_lock(THD *thd, int lock_type);
+ int create(const char *name, TABLE *table_arg,
+ HA_CREATE_INFO *create_info);
+ THR_LOCK_DATA **store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type);
+};
diff --git a/sql/handler.cc b/sql/handler.cc
index 53d4c4dfa4f..4a01003f418 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -32,6 +32,9 @@
#ifdef HAVE_BERKELEY_DB
#include "ha_berkeley.h"
#endif
+#ifdef HAVE_BLACKHOLE_DB
+#include "ha_blackhole.h"
+#endif
#ifdef HAVE_EXAMPLE_DB
#include "examples/ha_example.h"
#endif
@@ -103,6 +106,8 @@ struct show_table_type_st sys_table_types[]=
"CSV storage engine", DB_TYPE_CSV_DB},
{"FEDERATED",&have_federated_db,
"Federated MySQL storage engine", DB_TYPE_FEDERATED_DB},
+ {"BLACKHOLE",&have_blackhole_db,
+ "Storage engine designed to act as null storage", DB_TYPE_BLACKHOLE_DB},
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
};
@@ -211,6 +216,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_ARCHIVE_DB:
return new ha_archive(table);
#endif
+#ifdef HAVE_BLACKHOLE_DB
+ case DB_TYPE_BLACKHOLE_DB:
+ return new ha_blackhole(table);
+#endif
#ifdef HAVE_FEDERATED_DB
case DB_TYPE_FEDERATED_DB:
return new ha_federated(table);
diff --git a/sql/handler.h b/sql/handler.h
index f6d876fe0ad..e5b63c7f25c 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -160,7 +160,7 @@ enum db_type
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
DB_TYPE_FEDERATED_DB,
-
+ DB_TYPE_BLACKHOLE_DB,
DB_TYPE_DEFAULT // Must be last
};
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 3be99479e67..c607efa0797 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -2772,8 +2772,8 @@ String *Item_func_udf_str::val_str(String *str)
/*
- This has to come last in the udf_handler methods, or the compiler for IBM
- AIX fails to compile with debugging enabled. (Yes, really.)
+ This has to come last in the udf_handler methods, or C for AIX
+ version 6.0.0.0 fails to compile with debugging enabled. (Yes, really.)
*/
udf_handler::~udf_handler()
@@ -3569,20 +3569,20 @@ Item_func_set_user_var::update()
case REAL_RESULT:
{
res= update_hash((void*) &save_result.vreal,sizeof(save_result.vreal),
- REAL_RESULT, &my_charset_bin, DERIVATION_NONE);
+ REAL_RESULT, &my_charset_bin, DERIVATION_IMPLICIT);
break;
}
case INT_RESULT:
{
res= update_hash((void*) &save_result.vint, sizeof(save_result.vint),
- INT_RESULT, &my_charset_bin, DERIVATION_NONE);
+ INT_RESULT, &my_charset_bin, DERIVATION_IMPLICIT);
break;
}
case STRING_RESULT:
{
if (!save_result.vstr) // Null value
res= update_hash((void*) 0, 0, STRING_RESULT, &my_charset_bin,
- DERIVATION_NONE);
+ DERIVATION_IMPLICIT);
else
res= update_hash((void*) save_result.vstr->ptr(),
save_result.vstr->length(), STRING_RESULT,
@@ -3594,11 +3594,11 @@ Item_func_set_user_var::update()
{
if (!save_result.vdec) // Null value
res= update_hash((void*) 0, 0, DECIMAL_RESULT, &my_charset_bin,
- DERIVATION_NONE);
+ DERIVATION_IMPLICIT);
else
res= update_hash((void*) save_result.vdec,
sizeof(my_decimal), DECIMAL_RESULT,
- &my_charset_bin, DERIVATION_NONE);
+ &my_charset_bin, DERIVATION_IMPLICIT);
break;
}
case ROW_RESULT:
@@ -3850,7 +3850,10 @@ void Item_func_get_user_var::fix_length_and_dec()
}
}
else
+ {
+ collation.set(&my_charset_bin, DERIVATION_IMPLICIT);
null_value= 1;
+ }
if (error)
thd->fatal_error();
diff --git a/sql/item_func.h b/sql/item_func.h
index 93633e75619..9bf21fa1aa3 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -704,7 +704,8 @@ public:
Item_func_coercibility(Item *a) :Item_int_func(a) {}
longlong val_int();
const char *func_name() const { return "coercibility"; }
- void fix_length_and_dec() { max_length=10; }
+ void fix_length_and_dec() { max_length=10; maybe_null= 0; }
+ table_map not_null_tables() const { return 0; }
};
class Item_func_locate :public Item_int_func
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index ea8a78c528a..eec81f953fb 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -653,7 +653,9 @@ public:
{
collation.set(system_charset_info);
max_length= 64 * collation.collation->mbmaxlen; // should be enough
+ maybe_null= 0;
};
+ table_map not_null_tables() const { return 0; }
};
class Item_func_collation :public Item_str_func
@@ -666,7 +668,9 @@ public:
{
collation.set(system_charset_info);
max_length= 64 * collation.collation->mbmaxlen; // should be enough
+ maybe_null= 0;
};
+ table_map not_null_tables() const { return 0; }
};
class Item_func_crc32 :public Item_int_func
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 27c30cadd11..cf145e33d1e 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1041,25 +1041,28 @@ bool Query_log_event::write(IO_CACHE* file)
}
if (catalog_len) // i.e. "catalog inited" (false for 4.0 events)
{
- *start++= Q_CATALOG_CODE;
+ *start++= Q_CATALOG_NZ_CODE;
*start++= (uchar) catalog_len;
bmove(start, catalog, catalog_len);
start+= catalog_len;
/*
- We write a \0 at the end. As we also have written the length, it's
- apparently useless; but in fact it enables us to just do
- catalog= a_pointer_to_the_buffer_of_the_read_event
- later in the slave SQL thread.
- If we didn't have the \0, we would need to memdup to build the catalog in
- the slave SQL thread.
- And still the interest of having the length too is that in the slave SQL
- thread we immediately know at which position the catalog ends (no need to
- search for '\0'. In other words: length saves search, \0 saves mem alloc,
- at the cost of 1 redundant byte on the disk.
- Note that this is only a fix until we change 'catalog' to LEX_STRING
- (then we won't need the \0).
- */
- *(start++)= '\0';
+ In 5.0.x where x<4 masters we used to store the end zero here. This was
+ a waste of one byte so we don't do it in x>=4 masters. We change code to
+ Q_CATALOG_NZ_CODE, because re-using the old code would make x<4 slaves
+ of this x>=4 master segfault (expecting a zero when there is
+ none). Remaining compatibility problems are: the older slave will not
+ find the catalog; but it is will not crash, and it's not an issue
+ that it does not find the catalog as catalogs were not used in these older
+ MySQL versions (we store it in binlog and read it from relay log but do
+ nothing useful with it). What is an issue is that the older slave will
+ stop processing the Q_* blocks (and jumps to the db/query) as soon as it
+ sees unknown Q_CATALOG_NZ_CODE; so it will not be able to read
+ Q_AUTO_INCREMENT*, Q_CHARSET and so replication will fail silently in
+ various ways. Documented that you should not mix alpha/beta versions if
+ they are not exactly the same version, with example of 5.0.2<->5.0.3 and
+ 5.0.3<->5.0.4. If replication is from older to new, the new won't find
+ the catalog and will have the same problems.
+ */
}
if (auto_increment_increment != 1)
{
@@ -1259,10 +1262,10 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
pos+= 8;
break;
}
- case Q_CATALOG_CODE:
+ case Q_CATALOG_NZ_CODE:
if ((catalog_len= *pos))
catalog= (char*) pos+1; // Will be copied later
- pos+= catalog_len+2;
+ pos+= catalog_len+1;
break;
case Q_AUTO_INCREMENT:
auto_increment_increment= uint2korr(pos);
@@ -1297,9 +1300,10 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
DBUG_VOID_RETURN;
if (catalog_len) // If catalog is given
{
- memcpy(start, catalog, catalog_len+1); // Copy name and end \0
+ memcpy(start, catalog, catalog_len);
catalog= start;
- start+= catalog_len+1;
+ start+= catalog_len;
+ *start++= 0;
}
if (time_zone_len)
{
@@ -4006,8 +4010,10 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli)
strmov(p, ".info"); // strmov takes less code than memcpy
strnmov(proc_info, "Making temp file ", 17); // no end 0
thd->proc_info= proc_info;
- if ((fd = my_open(fname_buf, O_WRONLY|O_CREAT|O_BINARY|O_TRUNC,
- MYF(MY_WME))) < 0 ||
+ my_delete(fname_buf, MYF(0)); // old copy may exist already
+ if ((fd= my_create(fname_buf, CREATE_MODE,
+ O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
+ MYF(MY_WME))) < 0 ||
init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0,
MYF(MY_WME|MY_NABP)))
{
@@ -4031,8 +4037,10 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli)
my_close(fd, MYF(0));
// fname_buf now already has .data, not .info, because we did our trick
- if ((fd = my_open(fname_buf, O_WRONLY|O_CREAT|O_BINARY|O_TRUNC,
- MYF(MY_WME))) < 0)
+ my_delete(fname_buf, MYF(0)); // old copy may exist already
+ if ((fd= my_create(fname_buf, CREATE_MODE,
+ O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
+ MYF(MY_WME))) < 0)
{
slave_print_error(rli,my_errno, "Error in Create_file event: could not open file '%s'", fname_buf);
goto err;
@@ -4148,12 +4156,12 @@ void Append_block_log_event::pack_info(Protocol *protocol)
/*
- Append_block_log_event::get_open_mode()
+ Append_block_log_event::get_create_or_append()
*/
-int Append_block_log_event::get_open_mode() const
+int Append_block_log_event::get_create_or_append() const
{
- return O_WRONLY | O_APPEND | O_BINARY;
+ return 0; /* append to the file, fail if not exists */
}
/*
@@ -4171,7 +4179,20 @@ int Append_block_log_event::exec_event(struct st_relay_log_info* rli)
memcpy(p, ".data", 6);
strnmov(proc_info, "Making temp file ", 17); // no end 0
thd->proc_info= proc_info;
- if ((fd = my_open(fname, get_open_mode(), MYF(MY_WME))) < 0)
+ if (get_create_or_append())
+ {
+ my_delete(fname, MYF(0)); // old copy may exist already
+ if ((fd= my_create(fname, CREATE_MODE,
+ O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
+ MYF(MY_WME))) < 0)
+ {
+ slave_print_error(rli, my_errno,
+ "Error in %s event: could not create file '%s'",
+ get_type_str(), fname);
+ goto err;
+ }
+ }
+ else if ((fd = my_open(fname, O_WRONLY|O_APPEND|O_BINARY|O_NOFOLLOW, MYF(MY_WME))) < 0)
{
slave_print_error(rli, my_errno,
"Error in %s event: could not open file '%s'",
@@ -4384,7 +4405,7 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli)
Load_log_event* lev = 0;
memcpy(p, ".info", 6);
- if ((fd = my_open(fname, O_RDONLY|O_BINARY, MYF(MY_WME))) < 0 ||
+ if ((fd = my_open(fname, O_RDONLY|O_BINARY|O_NOFOLLOW, MYF(MY_WME))) < 0 ||
init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0,
MYF(MY_WME|MY_NABP)))
{
@@ -4483,9 +4504,9 @@ Begin_load_query_log_event(const char* buf, uint len,
#if defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-int Begin_load_query_log_event::get_open_mode() const
+int Begin_load_query_log_event::get_create_or_append() const
{
- return O_CREAT | O_WRONLY | O_BINARY | O_TRUNC;
+ return 1; /* create the file */
}
#endif /* defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */
@@ -4662,7 +4683,12 @@ Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli)
/* Forging file name for deletion in same buffer */
*fname_end= 0;
- (void) my_delete(fname, MYF(MY_WME));
+ /*
+ If there was an error the slave is going to stop, leave the
+ file so that we can re-execute this event at START SLAVE.
+ */
+ if (!error)
+ (void) my_delete(fname, MYF(MY_WME));
my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
return error;
diff --git a/sql/log_event.h b/sql/log_event.h
index 72142db0aa7..ba5d74b1784 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -234,10 +234,22 @@ struct sql_ex_info
/* these are codes, not offsets; not more than 256 values (1 byte). */
#define Q_FLAGS2_CODE 0
#define Q_SQL_MODE_CODE 1
+/*
+ Q_CATALOG_CODE is catalog with end zero stored; it is used only by MySQL
+ 5.0.x where 0<=x<=3.
+*/
#define Q_CATALOG_CODE 2
#define Q_AUTO_INCREMENT 3
#define Q_CHARSET_CODE 4
#define Q_TIME_ZONE_CODE 5
+/*
+ Q_CATALOG_NZ_CODE is catalog withOUT end zero stored; it is used by MySQL
+ 5.0.x where x>=4. Saves one byte in every Query_log_event in binlog,
+ compared to Q_CATALOG_CODE. The reason we didn't simply re-use
+ Q_CATALOG_CODE is that then a 5.0.3 slave of this 5.0.x (x>=4) master would
+ crash (segfault etc) because it would expect a 0 when there is none.
+*/
+#define Q_CATALOG_NZ_CODE 6
/* Intvar event post-header */
@@ -1367,7 +1379,7 @@ public:
#ifdef HAVE_REPLICATION
int exec_event(struct st_relay_log_info* rli);
void pack_info(Protocol* protocol);
- virtual int get_open_mode() const;
+ virtual int get_create_or_append() const;
#endif /* HAVE_REPLICATION */
#else
void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0);
@@ -1475,7 +1487,7 @@ public:
bool using_trans);
#ifdef HAVE_REPLICATION
Begin_load_query_log_event(THD* thd);
- int get_open_mode() const;
+ int get_create_or_append() const;
#endif /* HAVE_REPLICATION */
#endif
Begin_load_query_log_event(const char* buf, uint event_len,
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 1ee53e5ed64..7e4c6675e45 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -1155,6 +1155,7 @@ extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
extern SHOW_COMP_OPTION have_crypt;
extern SHOW_COMP_OPTION have_compress;
+extern SHOW_COMP_OPTION have_blackhole_db;
#ifndef __WIN__
extern pthread_t signal_thread;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index ecaa7ace841..95de170b99d 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -420,6 +420,7 @@ SHOW_COMP_OPTION have_federated_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
+SHOW_COMP_OPTION have_blackhole_db;
/* Thread specific variables */
@@ -3134,8 +3135,17 @@ we force server id to 2, but this MySQL server will not act as a slave.");
#endif
if (opt_bootstrap) /* If running with bootstrap, do not start replication. */
opt_skip_slave_start= 1;
- /* init_slave() must be called after the thread keys are created */
- init_slave();
+ /*
+ init_slave() must be called after the thread keys are created.
+ Some parts of the code (e.g. SHOW STATUS LIKE 'slave_running' and other
+ places) assume that active_mi != 0, so let's fail if it's 0 (out of
+ memory); a message has already been printed.
+ */
+ if (init_slave() && !active_mi)
+ {
+ end_thr_alarm(1); // Don't allow alarms
+ unireg_abort(1);
+ }
if (opt_bootstrap)
{
@@ -3892,10 +3902,19 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
char *suffix_pos;
char connect_number_char[22], *p;
const char *errmsg= 0;
+ SECURITY_ATTRIBUTES *sa_event= 0, *sa_mapping= 0;
my_thread_init();
DBUG_ENTER("handle_connections_shared_memorys");
DBUG_PRINT("general",("Waiting for allocated shared memory."));
+ if (my_security_attr_create(&sa_event, &errmsg,
+ GENERIC_ALL, SYNCHRONIZE | EVENT_MODIFY_STATE))
+ goto error;
+
+ if (my_security_attr_create(&sa_mapping, &errmsg,
+ GENERIC_ALL, FILE_MAP_READ | FILE_MAP_WRITE))
+ goto error;
+
/*
The name of event and file-mapping events create agree next rule:
shared_memory_base_name+unique_part
@@ -3905,22 +3924,22 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
*/
suffix_pos= strxmov(tmp,shared_memory_base_name,"_",NullS);
strmov(suffix_pos, "CONNECT_REQUEST");
- if ((smem_event_connect_request= CreateEvent(0,FALSE,FALSE,tmp)) == 0)
+ if ((smem_event_connect_request= CreateEvent(sa_event,
+ FALSE, FALSE, tmp)) == 0)
{
errmsg= "Could not create request event";
goto error;
}
strmov(suffix_pos, "CONNECT_ANSWER");
- if ((event_connect_answer= CreateEvent(0,FALSE,FALSE,tmp)) == 0)
+ if ((event_connect_answer= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
{
errmsg="Could not create answer event";
goto error;
}
strmov(suffix_pos, "CONNECT_DATA");
- if ((handle_connect_file_map= CreateFileMapping(INVALID_HANDLE_VALUE,0,
- PAGE_READWRITE,
- 0,sizeof(connect_number),
- tmp)) == 0)
+ if ((handle_connect_file_map=
+ CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping,
+ PAGE_READWRITE, 0, sizeof(connect_number), tmp)) == 0)
{
errmsg= "Could not create file mapping";
goto error;
@@ -3965,10 +3984,9 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
suffix_pos= strxmov(tmp,shared_memory_base_name,"_",connect_number_char,
"_",NullS);
strmov(suffix_pos, "DATA");
- if ((handle_client_file_map= CreateFileMapping(INVALID_HANDLE_VALUE,0,
- PAGE_READWRITE,0,
- smem_buffer_length,
- tmp)) == 0)
+ if ((handle_client_file_map=
+ CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping,
+ PAGE_READWRITE, 0, smem_buffer_length, tmp)) == 0)
{
errmsg= "Could not create file mapping";
goto errorconn;
@@ -3981,31 +3999,33 @@ pthread_handler_decl(handle_connections_shared_memory,arg)
goto errorconn;
}
strmov(suffix_pos, "CLIENT_WROTE");
- if ((event_client_wrote= CreateEvent(0, FALSE, FALSE, tmp)) == 0)
+ if ((event_client_wrote= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
{
errmsg= "Could not create client write event";
goto errorconn;
}
strmov(suffix_pos, "CLIENT_READ");
- if ((event_client_read= CreateEvent(0, FALSE, FALSE, tmp)) == 0)
+ if ((event_client_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
{
errmsg= "Could not create client read event";
goto errorconn;
}
strmov(suffix_pos, "SERVER_READ");
- if ((event_server_read= CreateEvent(0, FALSE, FALSE, tmp)) == 0)
+ if ((event_server_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
{
errmsg= "Could not create server read event";
goto errorconn;
}
strmov(suffix_pos, "SERVER_WROTE");
- if ((event_server_wrote= CreateEvent(0, FALSE, FALSE, tmp)) == 0)
+ if ((event_server_wrote= CreateEvent(sa_event,
+ FALSE, FALSE, tmp)) == 0)
{
errmsg= "Could not create server write event";
goto errorconn;
}
strmov(suffix_pos, "CONNECTION_CLOSED");
- if ((event_conn_closed= CreateEvent(0, TRUE , FALSE, tmp)) == 0)
+ if ((event_conn_closed= CreateEvent(sa_event,
+ TRUE, FALSE, tmp)) == 0)
{
errmsg= "Could not create closed connection event";
goto errorconn;
@@ -4080,6 +4100,8 @@ error:
strxmov(buff, "Can't create shared memory service: ", errmsg, ".", NullS);
sql_perror(buff);
}
+ my_security_attr_free(sa_event);
+ my_security_attr_free(sa_mapping);
if (handle_connect_map) UnmapViewOfFile(handle_connect_map);
if (handle_connect_file_map) CloseHandle(handle_connect_file_map);
if (event_connect_answer) CloseHandle(event_connect_answer);
@@ -5225,7 +5247,7 @@ The minimum value for this variable is 4096.",
"Max number of errors/warnings to store for a statement.",
(gptr*) &global_system_variables.max_error_count,
(gptr*) &max_system_variables.max_error_count,
- 0, GET_ULONG, REQUIRED_ARG, DEFAULT_ERROR_COUNT, 1, 65535, 0, 1, 0},
+ 0, GET_ULONG, REQUIRED_ARG, DEFAULT_ERROR_COUNT, 0, 65535, 0, 1, 0},
{"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE,
"Don't allow creation of heap tables bigger than this.",
(gptr*) &global_system_variables.max_heap_table_size,
@@ -5689,7 +5711,8 @@ struct show_var_st status_vars[]= {
{"Select_range_check", (char*) offsetof(STATUS_VAR, select_range_check_count), SHOW_LONG_STATUS},
{"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), SHOW_LONG_STATUS},
{"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG},
- {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING},
+ {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING},
+ {"Slave_retried_transactions",(char*) 0, SHOW_SLAVE_RETRIED_TRANS},
{"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG},
{"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS},
{"Sort_merge_passes", (char*) offsetof(STATUS_VAR, filesort_merge_passes), SHOW_LONG_STATUS},
@@ -5954,6 +5977,11 @@ static void mysql_init_variables(void)
#else
have_archive_db= SHOW_OPTION_NO;
#endif
+#ifdef HAVE_BLACKHOLE_DB
+ have_blackhole_db= SHOW_OPTION_YES;
+#else
+ have_blackhole_db= SHOW_OPTION_NO;
+#endif
#ifdef HAVE_FEDERATED_DB
have_federated_db= SHOW_OPTION_YES;
#else
diff --git a/sql/set_var.cc b/sql/set_var.cc
index f5df4dfbd2c..457df3f2947 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -767,6 +767,7 @@ struct show_var_st init_vars[]= {
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
{"have_archive", (char*) &have_archive_db, SHOW_HAVE},
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
+ {"have_blackhole_engine", (char*) &have_blackhole_db, SHOW_HAVE},
{"have_compress", (char*) &have_compress, SHOW_HAVE},
{"have_crypt", (char*) &have_crypt, SHOW_HAVE},
{"have_csv", (char*) &have_csv_db, SHOW_HAVE},
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 6b254f47284..7249605f10c 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -1774,7 +1774,7 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009
cze "P-Bli velk dlka sloupce '%-.64s' (nejvce %d). Pouijte BLOB"
dan "For stor feltlngde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet"
nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB"
- eng "Column length too big for column '%-.64s' (max = %d); use BLOB instead"
+ eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead"
jps "column '%-.64s' ,mۂ column ̑傫܂. (ő %d ܂). BLOB ɎgpĂ.",
est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB vljatpi"
fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB"
@@ -1789,7 +1789,7 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009
pol "Zbyt dua dugo? kolumny '%-.64s' (maks. = %d). W zamian uyj typu BLOB"
por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar"
rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine"
- rus " '%-.64s' ( = %d). BLOB "
+ rus " '%-.64s' ( = %d). BLOB TEXT "
serbian "Previe podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje"
slo "Prli vek dka pre pole '%-.64s' (maximum = %d). Pouite BLOB"
spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar"
diff --git a/sql/slave.cc b/sql/slave.cc
index c92350f4a2f..ebf87660a0e 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -27,6 +27,7 @@
#include <my_dir.h>
#include <sql_common.h>
+#define MAX_SLAVE_RETRY_PAUSE 5
bool use_slave_mask = 0;
MY_BITMAP slave_error_mask;
@@ -2528,7 +2529,7 @@ st_relay_log_info::st_relay_log_info()
ignore_log_space_limit(0), last_master_timestamp(0), slave_skip_counter(0),
abort_pos_wait(0), slave_run_id(0), sql_thd(0), last_slave_errno(0),
inited(0), abort_slave(0), slave_running(0), until_condition(UNTIL_NONE),
- until_log_pos(0)
+ until_log_pos(0), retried_trans(0)
{
group_relay_log_name[0]= event_relay_log_name[0]=
group_master_log_name[0]= 0;
@@ -3261,9 +3262,8 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
init_master_info()).
b) init_relay_log_pos(), because the BEGIN may be an older relay log.
*/
- if (rli->trans_retries--)
+ if (rli->trans_retries < slave_trans_retries)
{
- sql_print_information("Slave SQL thread retries transaction");
if (init_master_info(rli->mi, 0, 0, 0, SLAVE_SQL))
sql_print_error("Failed to initialize the master info structure");
else if (init_relay_log_pos(rli,
@@ -3275,8 +3275,16 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
else
{
exec_res= 0;
- sleep(2); // chance for concurrent connection to get more locks
- }
+ /* chance for concurrent connection to get more locks */
+ safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
+ (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli);
+ pthread_mutex_lock(&rli->data_lock); // because of SHOW STATUS
+ rli->trans_retries++;
+ rli->retried_trans++;
+ pthread_mutex_unlock(&rli->data_lock);
+ DBUG_PRINT("info", ("Slave retries transaction "
+ "rli->trans_retries: %lu", rli->trans_retries));
+ }
}
else
sql_print_error("Slave SQL thread retried transaction %lu time(s) "
@@ -3285,8 +3293,8 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
slave_trans_retries);
}
if (!((thd->options & OPTION_BEGIN) && opt_using_transactions))
- rli->trans_retries= slave_trans_retries; // restart from fresh
- }
+ rli->trans_retries= 0; // restart from fresh
+ }
return exec_res;
}
else
@@ -3701,7 +3709,7 @@ slave_begin:
pthread_mutex_lock(&rli->log_space_lock);
rli->ignore_log_space_limit= 0;
pthread_mutex_unlock(&rli->log_space_lock);
- rli->trans_retries= slave_trans_retries; // start from "no error"
+ rli->trans_retries= 0; // start from "no error"
if (init_relay_log_pos(rli,
rli->group_relay_log_name,
diff --git a/sql/slave.h b/sql/slave.h
index bc41cd4deca..c41234ab2ed 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -293,7 +293,14 @@ typedef struct st_relay_log_info
} until_log_names_cmp_result;
char cached_charset[6];
- ulong trans_retries;
+ /*
+ trans_retries varies between 0 to slave_transaction_retries and counts how
+ many times the slave has retried the present transaction; gets reset to 0
+ when the transaction finally succeeds. retried_trans is a cumulative
+ counter: how many times the slave has retried a transaction (any) since
+ slave started.
+ */
+ ulong trans_retries, retried_trans;
st_relay_log_info();
~st_relay_log_info();
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 0a5939428cd..021d771cef1 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1097,6 +1097,9 @@ static void acl_insert_db(const char *user, const char *host, const char *db,
/*
Get privilege for a host, user and db combination
+
+ as db_is_pattern changes the semantics of comparison,
+ acl_cache is not used if db_is_pattern is set.
*/
ulong acl_get(const char *host, const char *ip,
@@ -1116,7 +1119,7 @@ ulong acl_get(const char *host, const char *ip,
db=tmp_db;
}
key_length=(uint) (end-key);
- if ((entry=(acl_entry*) acl_cache->search(key,key_length)))
+ if (!db_is_pattern && (entry=(acl_entry*) acl_cache->search(key,key_length)))
{
db_access=entry->access;
VOID(pthread_mutex_unlock(&acl_cache->lock));
@@ -1165,7 +1168,8 @@ ulong acl_get(const char *host, const char *ip,
}
exit:
/* Save entry in cache for quick retrieval */
- if ((entry= (acl_entry*) malloc(sizeof(acl_entry)+key_length)))
+ if (!db_is_pattern &&
+ (entry= (acl_entry*) malloc(sizeof(acl_entry)+key_length)))
{
entry->access=(db_access & host_access);
entry->length=key_length;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index e2f7c9d62c8..a0fed715405 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -81,6 +81,9 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
List<Item> &set_values, READ_INFO &read_info,
String &enclosed, ulong skip_lines,
bool ignore_check_option_errors);
+static bool write_execute_load_query_log_event(THD *thd,
+ bool duplicates, bool ignore,
+ bool transactional_table);
/*
@@ -413,8 +416,14 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
/* If the file was not empty, wrote_create_file is true */
if (lf_info.wrote_create_file)
{
- Delete_file_log_event d(thd, db, transactional_table);
- mysql_bin_log.write(&d);
+ if ((info.copied || info.deleted) && !transactional_table)
+ write_execute_load_query_log_event(thd, handle_duplicates,
+ ignore, transactional_table);
+ else
+ {
+ Delete_file_log_event d(thd, db, transactional_table);
+ mysql_bin_log.write(&d);
+ }
}
}
#endif /*!EMBEDDED_LIBRARY*/
@@ -437,16 +446,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
*/
read_info.end_io_cache();
if (lf_info.wrote_create_file)
- {
- Execute_load_query_log_event e(thd, thd->query, thd->query_length,
- (char*)thd->lex->fname_start - (char*)thd->query,
- (char*)thd->lex->fname_end - (char*)thd->query,
- (handle_duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
- (ignore ? LOAD_DUP_IGNORE :
- LOAD_DUP_ERROR),
- transactional_table, FALSE);
- mysql_bin_log.write(&e);
- }
+ write_execute_load_query_log_event(thd, handle_duplicates,
+ ignore, transactional_table);
}
#endif /*!EMBEDDED_LIBRARY*/
if (transactional_table)
@@ -462,6 +463,23 @@ err:
DBUG_RETURN(error);
}
+
+/* Not a very useful function; just to avoid duplication of code */
+static bool write_execute_load_query_log_event(THD *thd,
+ bool duplicates, bool ignore,
+ bool transactional_table)
+{
+ Execute_load_query_log_event
+ e(thd, thd->query, thd->query_length,
+ (char*)thd->lex->fname_start - (char*)thd->query,
+ (char*)thd->lex->fname_end - (char*)thd->query,
+ (duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE :
+ (ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR),
+ transactional_table, FALSE);
+ return mysql_bin_log.write(&e);
+}
+
+
/****************************************************************************
** Read of rows of fixed size + optional garage + optonal newline
****************************************************************************/
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 1cbc52a2a5a..7e2c37f130e 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1970,6 +1970,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
{
ulong stmt_id= uint4korr(packet);
ulong flags= (ulong) ((uchar) packet[4]);
+ Cursor *cursor= 0;
/*
Query text for binary log, or empty string if the query is not put into
binary log.
@@ -2007,15 +2008,17 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
statement: we can't open a cursor for it.
*/
flags= 0;
+ my_error(ER_SP_BAD_CURSOR_QUERY, MYF(0));
+ goto err;
}
else
{
DBUG_PRINT("info",("Using READ_ONLY cursor"));
if (!stmt->cursor &&
- !(stmt->cursor= new (&stmt->main_mem_root) Cursor()))
+ !(cursor= stmt->cursor= new (&stmt->main_mem_root) Cursor()))
DBUG_VOID_RETURN;
/* If lex->result is set, mysql_execute_command will use it */
- stmt->lex->result= &stmt->cursor->result;
+ stmt->lex->result= &cursor->result;
}
}
#ifndef EMBEDDED_LIBRARY
@@ -2061,11 +2064,10 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length)
my_pthread_setprio(pthread_self(), WAIT_PRIOR);
thd->protocol= &thd->protocol_simple; // Use normal protocol
- if (flags & (ulong) CURSOR_TYPE_READ_ONLY)
+ if (cursor && cursor->is_open())
{
- if (stmt->cursor->is_open())
- stmt->cursor->init_from_thd(thd);
- stmt->cursor->state= stmt->state;
+ cursor->init_from_thd(thd);
+ cursor->state= stmt->state;
}
else
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 856fb937b6e..461a0f8b9d6 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -8714,8 +8714,9 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
*table= new_table;
table->s= &table->share_not_to_be_used;
table->file->change_table_ptr(table);
- thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ?
- "Copying to tmp table on disk" : save_proc_info);
+ if (save_proc_info)
+ thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ?
+ "Copying to tmp table on disk" : save_proc_info);
DBUG_RETURN(0);
err:
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 353f1fc5157..556d5b3d89b 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -370,7 +370,7 @@ public:
void close();
void set_unit(SELECT_LEX_UNIT *unit_arg) { unit= unit_arg; }
- Cursor() :join(0), unit(0) {}
+ Cursor() :Item_arena(TRUE), join(0), unit(0) {}
~Cursor();
};
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index fadc445e85d..bd0b8926c6a 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -41,6 +41,7 @@ static int
store_create_info(THD *thd, TABLE_LIST *table_list, String *packet);
static int
view_store_create_info(THD *thd, TABLE_LIST *table, String *packet);
+static bool schema_table_store_record(THD *thd, TABLE *table);
/***************************************************************************
@@ -978,7 +979,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet)
packet->append(buff, (uint) (end- buff));
}
- if (share->max_rows)
+ if (share->max_rows && !table_list->schema_table)
{
packet->append(" MAX_ROWS=", 10);
end= longlong10_to_str(share->max_rows, buff, 10);
@@ -1331,6 +1332,19 @@ static bool show_status_array(THD *thd, const char *wild,
pthread_mutex_unlock(&LOCK_active_mi);
break;
}
+ case SHOW_SLAVE_RETRIED_TRANS:
+ {
+ /*
+ TODO: in 5.1 with multimaster, have one such counter per line in SHOW
+ SLAVE STATUS, and have the sum over all lines here.
+ */
+ pthread_mutex_lock(&LOCK_active_mi);
+ pthread_mutex_lock(&active_mi->rli.data_lock);
+ end= int10_to_str(active_mi->rli.retried_trans, buff, 10);
+ pthread_mutex_unlock(&active_mi->rli.data_lock);
+ pthread_mutex_unlock(&LOCK_active_mi);
+ break;
+ }
#endif /* HAVE_REPLICATION */
case SHOW_OPENTABLES:
end= int10_to_str((long) cached_tables(), buff, 10);
@@ -1537,7 +1551,8 @@ static bool show_status_array(THD *thd, const char *wild,
table->field[0]->store(name_buffer, strlen(name_buffer),
system_charset_info);
table->field[1]->store(pos, (uint32) (end - pos), system_charset_info);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(TRUE);
}
}
}
@@ -1593,6 +1608,33 @@ typedef struct st_index_field_values
} INDEX_FIELD_VALUES;
+/*
+ Store record to I_S table, convert HEAP table
+ to MyISAM if necessary
+
+ SYNOPSIS
+ schema_table_store_record()
+ thd thread handler
+ table I_S table
+ RETURN
+ 1 error
+ 0 success
+*/
+
+static bool schema_table_store_record(THD *thd, TABLE *table)
+{
+ int error;
+ if ((error= table->file->write_row(table->record[0])))
+ {
+ if (create_myisam_from_heap(thd, table,
+ table->pos_in_table_list->schema_table_param,
+ error, 0))
+ return 1;
+ }
+ return 0;
+}
+
+
void get_index_field_values(LEX *lex, INDEX_FIELD_VALUES *index_field_values)
{
const char *wild= lex->wild ? lex->wild->ptr() : NullS;
@@ -1916,7 +1958,8 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
}
}
}
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(error);
}
else
{
@@ -1953,13 +1996,13 @@ err:
}
-void store_schema_shemata(TABLE *table, const char *db_name,
+bool store_schema_shemata(THD* thd, TABLE *table, const char *db_name,
const char* cs_name)
{
restore_record(table, s->default_values);
table->field[1]->store(db_name, strlen(db_name), system_charset_info);
table->field[2]->store(cs_name, strlen(cs_name), system_charset_info);
- table->file->write_row(table->record[0]);
+ return schema_table_store_record(thd, table);
}
@@ -1988,7 +2031,9 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond)
{
if (with_i_schema) // information schema name is always first in list
{
- store_schema_shemata(table, file_name, system_charset_info->csname);
+ if (store_schema_shemata(thd, table, file_name,
+ system_charset_info->csname))
+ DBUG_RETURN(1);
with_i_schema= 0;
continue;
}
@@ -2011,8 +2056,9 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond)
path[length-1]= FN_LIBCHAR;
strmov(path+length, MY_DB_OPT_FILE);
load_db_opt(thd, path, &create);
- store_schema_shemata(table, file_name,
- create.default_table_charset->csname);
+ if (store_schema_shemata(thd, table, file_name,
+ create.default_table_charset->csname))
+ DBUG_RETURN(1);
}
}
DBUG_RETURN(0);
@@ -2197,8 +2243,7 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables,
}
}
}
- table->file->write_row(table->record[0]);
- DBUG_RETURN(0);
+ DBUG_RETURN(schema_table_store_record(thd, table));
}
@@ -2376,7 +2421,8 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables,
#endif
table->field[17]->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), cs);
table->field[18]->store(field->comment.str, field->comment.length, cs);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
}
}
DBUG_RETURN(0);
@@ -2405,7 +2451,8 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
strlen(tmp_cs->comment ? tmp_cs->comment : ""),
scs);
table->field[3]->store((longlong) tmp_cs->mbmaxlen);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ return 1;
}
}
return 0;
@@ -2444,7 +2491,8 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
tmp_buff= (tmp_cl->state & MY_CS_COMPILED)? "Yes" : "";
table->field[4]->store(tmp_buff, strlen(tmp_buff), scs);
table->field[5]->store((longlong) tmp_cl->strxfrm_multiply);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ return 1;
}
}
}
@@ -2473,14 +2521,15 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond)
restore_record(table, s->default_values);
table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs);
table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ return 1;
}
}
return 0;
}
-void store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
+bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
const char *wild, bool full_access, const char *sp_user)
{
String tmp_string;
@@ -2494,7 +2543,7 @@ void store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
if (!full_access)
full_access= !strcmp(sp_user, definer);
if (!full_access && check_some_routine_access(thd, sp_db, sp_name))
- return;
+ return 0;
if (lex->orig_sql_command == SQLCOM_SHOW_STATUS_PROC &&
proc_table->field[2]->val_int() == TYPE_ENUM_PROCEDURE ||
@@ -2543,9 +2592,10 @@ void store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
get_field(thd->mem_root, proc_table->field[15], &tmp_string);
table->field[18]->store(tmp_string.ptr(), tmp_string.length(), cs);
table->field[19]->store(definer, strlen(definer), cs);
- table->file->write_row(table->record[0]);
+ return schema_table_store_record(thd, table);
}
}
+ return 0;
}
@@ -2578,9 +2628,19 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
res= (res == HA_ERR_END_OF_FILE) ? 0 : 1;
goto err;
}
- store_schema_proc(thd, table, proc_table, wild, full_access, definer);
+ if (store_schema_proc(thd, table, proc_table, wild, full_access, definer))
+ {
+ res= 1;
+ goto err;
+ }
while (!proc_table->file->index_next(proc_table->record[0]))
- store_schema_proc(thd, table, proc_table, wild, full_access, definer);
+ {
+ if (store_schema_proc(thd, table, proc_table, wild, full_access, definer))
+ {
+ res= 1;
+ goto err;
+ }
+ }
err:
proc_table->file->ha_index_end();
@@ -2670,7 +2730,8 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
else
table->field[14]->store("", 0, cs);
table->field[14]->set_notnull();
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
}
}
}
@@ -2709,7 +2770,7 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables,
table->field[5]->store("YES", 3, cs);
else
table->field[5]->store("NO", 2, cs);
- table->file->write_row(table->record[0]);
+ DBUG_RETURN(schema_table_store_record(thd, table));
}
}
else
@@ -2723,9 +2784,9 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables,
}
-void store_constraints(TABLE *table, const char*db, const char *tname,
- const char *key_name, uint key_len,
- const char *con_type, uint con_len)
+bool store_constraints(THD *thd, TABLE *table, const char *db,
+ const char *tname, const char *key_name,
+ uint key_len, const char *con_type, uint con_len)
{
CHARSET_INFO *cs= system_charset_info;
restore_record(table, s->default_values);
@@ -2734,7 +2795,7 @@ void store_constraints(TABLE *table, const char*db, const char *tname,
table->field[3]->store(db, strlen(db), cs);
table->field[4]->store(tname, strlen(tname), cs);
table->field[5]->store(con_type, con_len, cs);
- table->file->write_row(table->record[0]);
+ return schema_table_store_record(thd, table);
}
@@ -2767,11 +2828,17 @@ static int get_schema_constraints_record(THD *thd, struct st_table_list *tables,
continue;
if (i == primary_key && !strcmp(key_info->name, primary_key_name))
- store_constraints(table, base_name, file_name, key_info->name,
- strlen(key_info->name), "PRIMARY KEY", 11);
+ {
+ if (store_constraints(thd, table, base_name, file_name, key_info->name,
+ strlen(key_info->name), "PRIMARY KEY", 11))
+ DBUG_RETURN(1);
+ }
else if (key_info->flags & HA_NOSAME)
- store_constraints(table, base_name, file_name, key_info->name,
- strlen(key_info->name), "UNIQUE", 6);
+ {
+ if (store_constraints(thd, table, base_name, file_name, key_info->name,
+ strlen(key_info->name), "UNIQUE", 6))
+ DBUG_RETURN(1);
+ }
}
show_table->file->get_foreign_key_list(thd, &f_key_list);
@@ -2779,8 +2846,11 @@ static int get_schema_constraints_record(THD *thd, struct st_table_list *tables,
List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list);
while ((f_key_info=it++))
{
- store_constraints(table, base_name, file_name, f_key_info->forein_id->str,
- strlen(f_key_info->forein_id->str), "FOREIGN KEY", 11);
+ if (store_constraints(thd, table, base_name, file_name,
+ f_key_info->forein_id->str,
+ strlen(f_key_info->forein_id->str),
+ "FOREIGN KEY", 11))
+ DBUG_RETURN(1);
}
}
DBUG_RETURN(res);
@@ -2843,7 +2913,8 @@ static int get_schema_key_column_usage_record(THD *thd,
key_part->field->field_name,
strlen(key_part->field->field_name),
(longlong) f_idx);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
}
}
}
@@ -2869,7 +2940,8 @@ static int get_schema_key_column_usage_record(THD *thd,
(longlong) f_idx);
table->field[8]->store((longlong) f_idx);
table->field[8]->set_notnull();
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
}
}
}
@@ -2894,7 +2966,8 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond)
table->field[1]->store(open_list->table, strlen(open_list->table), cs);
table->field[2]->store((longlong) open_list->in_use);
table->field[3]->store((longlong) open_list->locked);
- table->file->write_row(table->record[0]);
+ if (schema_table_store_record(thd, table))
+ DBUG_RETURN(1);
}
DBUG_RETURN(0);
}
@@ -3035,6 +3108,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
TMP_TABLE_ALL_COLUMNS),
HA_POS_ERROR, table_list->alias)))
DBUG_RETURN(0);
+ table_list->schema_table_param= tmp_table_param;
DBUG_RETURN(table);
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 13c7f0a40b7..e366668659b 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -418,7 +418,7 @@ bool st_select_lex_unit::exec()
}
/* re-enabling indexes for next subselect iteration */
if (union_distinct && table->file->enable_indexes(HA_KEY_SWITCH_ALL))
- DBUG_ASSERT(TRUE);
+ DBUG_ASSERT(0);
}
for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select())
{
diff --git a/sql/structs.h b/sql/structs.h
index cbc7161ee20..7a70bfc0f4f 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -182,7 +182,7 @@ enum SHOW_TYPE
SHOW_SSL_CTX_SESS_TIMEOUTS, SHOW_SSL_CTX_SESS_CACHE_FULL,
SHOW_SSL_GET_CIPHER_LIST,
#endif /* HAVE_OPENSSL */
- SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING,
+ SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING, SHOW_SLAVE_RETRIED_TRANS,
SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG,
SHOW_LONG_STATUS, SHOW_LONG_CONST_STATUS
};
diff --git a/sql/table.h b/sql/table.h
index 4312e09cfe3..fd299759678 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -322,6 +322,7 @@ typedef struct st_schema_table
struct st_lex;
class select_union;
+class TMP_TABLE_PARAM;
struct Field_translator
{
@@ -370,6 +371,7 @@ typedef struct st_table_list
ST_SCHEMA_TABLE *schema_table; /* Information_schema table */
st_select_lex *schema_select_lex;
bool schema_table_reformed;
+ TMP_TABLE_PARAM *schema_table_param;
/* link to select_lex where this table was used */
st_select_lex *select_lex;
st_lex *view; /* link on VIEW lex for merging */