summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/authors.h36
-rw-r--r--sql/event_data_objects.cc2
-rw-r--r--sql/event_db_repository.cc16
-rw-r--r--sql/field.cc151
-rw-r--r--sql/field.h114
-rw-r--r--sql/filesort.cc8
-rw-r--r--sql/ha_partition.cc85
-rw-r--r--sql/ha_partition.h1
-rw-r--r--sql/handler.cc148
-rw-r--r--sql/handler.h177
-rw-r--r--sql/item.cc30
-rw-r--r--sql/item.h115
-rw-r--r--sql/item_func.cc20
-rw-r--r--sql/item_func.h75
-rw-r--r--sql/item_row.h3
-rw-r--r--sql/item_strfunc.h17
-rw-r--r--sql/item_subselect.cc26
-rw-r--r--sql/item_subselect.h4
-rw-r--r--sql/item_sum.h12
-rw-r--r--sql/item_timefunc.h37
-rw-r--r--sql/item_xmlfunc.cc9
-rw-r--r--sql/item_xmlfunc.h4
-rw-r--r--sql/lex.h14
-rw-r--r--sql/log.cc66
-rw-r--r--sql/log.h5
-rw-r--r--sql/log_event.cc138
-rw-r--r--sql/log_event.h18
-rw-r--r--sql/log_event_old.cc49
-rw-r--r--sql/mysql_priv.h34
-rw-r--r--sql/mysqld.cc45
-rw-r--r--sql/opt_range.cc87
-rw-r--r--sql/opt_range.h2
-rw-r--r--sql/opt_sum.cc38
-rw-r--r--sql/procedure.h6
-rw-r--r--sql/records.cc15
-rw-r--r--sql/rpl_filter.cc9
-rw-r--r--sql/rpl_filter.h5
-rw-r--r--sql/set_var.cc3
-rw-r--r--sql/share/errmsg.txt27
-rw-r--r--sql/sp.cc19
-rw-r--r--sql/sp_head.cc5
-rw-r--r--sql/sql_acl.cc69
-rw-r--r--sql/sql_base.cc155
-rw-r--r--sql/sql_class.cc137
-rw-r--r--sql/sql_class.h36
-rw-r--r--sql/sql_connect.cc481
-rw-r--r--sql/sql_cursor.cc2
-rw-r--r--sql/sql_handler.cc20
-rw-r--r--sql/sql_help.cc12
-rw-r--r--sql/sql_insert.cc12
-rw-r--r--sql/sql_lex.cc1
-rw-r--r--sql/sql_lex.h11
-rw-r--r--sql/sql_parse.cc143
-rw-r--r--sql/sql_partition.cc3
-rw-r--r--sql/sql_plugin.cc8
-rw-r--r--sql/sql_prepare.cc15
-rw-r--r--sql/sql_select.cc127
-rw-r--r--sql/sql_servers.cc24
-rw-r--r--sql/sql_show.cc430
-rw-r--r--sql/sql_string.cc4
-rw-r--r--sql/sql_string.h8
-rw-r--r--sql/sql_table.cc73
-rw-r--r--sql/sql_udf.cc8
-rw-r--r--sql/sql_update.cc12
-rw-r--r--sql/sql_yacc.yy164
-rw-r--r--sql/structs.h108
-rw-r--r--sql/table.cc590
-rw-r--r--sql/table.h13
-rw-r--r--sql/thr_malloc.cc2
-rw-r--r--sql/tztime.cc28
-rw-r--r--sql/unireg.cc82
-rw-r--r--sql/unireg.h7
72 files changed, 3913 insertions, 547 deletions
diff --git a/sql/authors.h b/sql/authors.h
index dfe3b143e2f..4a321bebb7d 100644
--- a/sql/authors.h
+++ b/sql/authors.h
@@ -34,23 +34,35 @@ struct show_table_authors_st {
*/
struct show_table_authors_st show_table_authors[]= {
+ { "Michael (Monty) Widenius", "Tusby, Finland",
+ "Lead developer and main author" },
+ { "David Axmark", "London, England",
+ "MySQL founder; Small stuff long time ago, Monty ripped it out!" },
+ { "Sergei Golubchik", "Kerpen, Germany",
+ "Full-text search, precision math" },
+ { "Igor Babaev", "Bellevue, USA", "Optimizer, keycache, core work"},
+ { "Sergey Petrunia", "St. Petersburg, Russia", "Optimizer"},
+ { "Oleksandr Byelkin", "Lugansk, Ukraine",
+ "Query Cache (4.0), Subqueries (4.1), Views (5.0)" },
{ "Brian (Krow) Aker", "Seattle, WA, USA",
"Architecture, archive, federated, bunch of little stuff :)" },
- { "Venu Anuganti", "", "Client/server protocol (4.1)" },
- { "David Axmark", "Uppsala, Sweden",
- "Small stuff long time ago, Monty ripped it out!" },
+ { "Kristian Nielsen", "Copenhagen, Denmark",
+ "General build stuff," },
{ "Alexander (Bar) Barkov", "Izhevsk, Russia",
"Unicode and character sets (4.1)" },
+ { "Guilhem Bichot", "Bordeaux, France", "Replication (since 4.0)" },
+ { "Venu Anuganti", "", "Client/server protocol (4.1)" },
+ { "Konstantin Osipov", "Moscow, Russia",
+ "Prepared statements (4.1), Cursors (5.0)" },
+ { "Dmitri Lenev", "Moscow, Russia",
+ "Time zones support (4.1), Triggers (5.0)" },
{ "Omer BarNir", "Sunnyvale, CA, USA",
"Testing (sometimes) and general QA stuff" },
- { "Guilhem Bichot", "Bordeaux, France", "Replication (since 4.0)" },
{ "John Birrell", "", "Emulation of pthread_mutex() for OS/2" },
{ "Andreas F. Bobak", "", "AGGREGATE extension to user-defined functions" },
{ "Alexey Botchkov (Holyfoot)", "Izhevsk, Russia",
"GIS extensions (4.1), embedded server (4.1), precision math (5.0)"},
{ "Reggie Burnett", "Nashville, TN, USA", "Windows development, Connectors" },
- { "Oleksandr Byelkin", "Lugansk, Ukraine",
- "Query Cache (4.0), Subqueries (4.1), Views (5.0)" },
{ "Kent Boortz", "Orebro, Sweden", "Test platform, and general build stuff" },
{ "Tim Bunce", "", "mysqlhotcopy" },
{ "Yves Carlier", "", "mysqlaccess" },
@@ -67,8 +79,6 @@ struct show_table_authors_st show_table_authors[]= {
{ "Yuri Dario", "", "OS/2 port" },
{ "Andrei Elkin", "Espoo, Finland", "Replication" },
{ "Patrick Galbraith", "Sharon, NH", "Federated Engine, mysqlslap" },
- { "Sergei Golubchik", "Kerpen, Germany",
- "Full-text search, precision math" },
{ "Lenz Grimmer", "Hamburg, Germany",
"Production (build and release) engineering" },
{ "Nikolay Grishakin", "Austin, TX, USA", "Testing - Server" },
@@ -83,8 +93,6 @@ struct show_table_authors_st show_table_authors[]= {
{ "Hakan Küçükyılmaz", "Walldorf, Germany", "Testing - Server" },
{ "Greg (Groggy) Lehey", "Uchunga, SA, Australia", "Backup" },
{ "Matthias Leich", "Berlin, Germany", "Testing - Server" },
- { "Dmitri Lenev", "Moscow, Russia",
- "Time zones support (4.1), Triggers (5.0)" },
{ "Arjen Lentz", "Brisbane, Australia",
"Documentation (2001-2004), Dutch error messages, LOG2()" },
{ "Marc Liyanage", "", "Created Mac OS X packages" },
@@ -96,8 +104,6 @@ struct show_table_authors_st show_table_authors[]= {
{ "Jonathan (Jeb) Miller", "Kyle, TX, USA",
"Testing - Cluster, Replication" },
{ "Elliot Murphy", "Cocoa, FL, USA", "Replication and backup" },
- { "Kristian Nielsen", "Copenhagen, Denmark",
- "General build stuff" },
{ "Pekka Nouisiainen", "Stockholm, Sweden",
"NDB Cluster: BLOB support, character set support, ordered indexes" },
{ "Alexander Nozdrin", "Moscow, Russia",
@@ -105,8 +111,6 @@ struct show_table_authors_st show_table_authors[]= {
{ "Per Eric Olsson", "", "Testing of dynamic record format" },
{ "Jonas Oreland", "Stockholm, Sweden",
"NDB Cluster, Online Backup, lots of other things" },
- { "Konstantin Osipov", "Moscow, Russia",
- "Prepared statements (4.1), Cursors (5.0)" },
{ "Alexander (Sasha) Pachev", "Provo, UT, USA",
"Statement-based replication, SHOW CREATE TABLE, mysql-bench" },
{ "Irena Pancirov", "", "Port to Windows with Borland compiler" },
@@ -144,9 +148,9 @@ struct show_table_authors_st show_table_authors[]= {
{ "Sergey Vojtovich", "Izhevsk, Russia", "Plugins infrastructure (5.1)" },
{ "Matt Wagner", "Northfield, MN, USA", "Bug fixing" },
{ "Jim Winstead Jr.", "Los Angeles, CA, USA", "Bug fixing" },
- { "Michael (Monty) Widenius", "Tusby, Finland",
- "Lead developer and main author" },
{ "Peter Zaitsev", "Tacoma, WA, USA",
"SHA1(), AES_ENCRYPT(), AES_DECRYPT(), bug fixing" },
+ {"Mark Mark Callaghan", "Texas, USA", "Statistics patches"},
+ {"Percona", "CA, USA", "Microslow patches"},
{NULL, NULL, NULL}
};
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 92427c0ba41..ea444b83f63 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -1366,7 +1366,7 @@ Event_job_data::execute(THD *thd, bool drop)
DBUG_ENTER("Event_job_data::execute");
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, 0);
/*
MySQL parser currently assumes that current database is either
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 8faab5023da..98562d36325 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -404,17 +404,18 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table,
}
key_copy(key_buf, event_table->record[0], key_info, key_len);
- if (!(ret= event_table->file->index_read_map(event_table->record[0], key_buf,
- (key_part_map)1,
- HA_READ_PREFIX)))
+ if (!(ret= event_table->file->ha_index_read_map(event_table->record[0],
+ key_buf,
+ (key_part_map)1,
+ HA_READ_PREFIX)))
{
DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret));
do
{
ret= copy_event_to_schema_table(thd, schema_table, event_table);
if (ret == 0)
- ret= event_table->file->index_next_same(event_table->record[0],
- key_buf, key_len);
+ ret= event_table->file->ha_index_next_same(event_table->record[0],
+ key_buf, key_len);
} while (ret == 0);
}
DBUG_PRINT("info", ("Scan finished. ret=%d", ret));
@@ -883,8 +884,9 @@ Event_db_repository::find_named_event(LEX_STRING db, LEX_STRING name,
key_copy(key, table->record[0], table->key_info, table->key_info->key_length);
- if (table->file->index_read_idx_map(table->record[0], 0, key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0], 0, key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
DBUG_PRINT("info", ("Row not found"));
DBUG_RETURN(TRUE);
diff --git a/sql/field.cc b/sql/field.cc
index 69fe192c1d1..b10f9f88022 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -57,7 +57,7 @@ const char field_separator=',';
((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
-#define ASSERT_COLUMN_MARKED_FOR_WRITE DBUG_ASSERT(!table || (!table->write_set || bitmap_is_set(table->write_set, field_index)))
+#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(!table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || bitmap_is_set(&table->vcol_set, field_index)))
/*
Rules for merging different types of fields in UNION
@@ -1312,7 +1312,8 @@ Field::Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
key_start(0), part_of_key(0), part_of_key_not_clustered(0),
part_of_sortkey(0), unireg_check(unireg_check_arg),
field_length(length_arg), null_bit(null_bit_arg),
- is_created_from_null_item(FALSE)
+ is_created_from_null_item(FALSE),
+ vcol_info(0), stored_in_db(TRUE)
{
flags=null_ptr ? 0: NOT_NULL_FLAG;
comment.str= (char*) "";
@@ -1611,7 +1612,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val,
int Field_num::store_decimal(const my_decimal *val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int err= 0;
longlong i= convert_decimal2longlong(val, unsigned_flag, &err);
return test(err | store(i, unsigned_flag));
@@ -1683,7 +1684,7 @@ void Field_num::make_field(Send_field *field)
int Field_str::store_decimal(const my_decimal *d)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
double val;
/* TODO: use decimal2string? */
int err= warn_if_overflow(my_decimal2double(E_DEC_FATAL_ERROR &
@@ -1760,7 +1761,7 @@ bool Field::get_time(MYSQL_TIME *ltime)
int Field::store_time(MYSQL_TIME *ltime, timestamp_type type_arg)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= (uint) my_TIME_to_str(ltime, buff);
return store(buff, length, &my_charset_bin);
@@ -1887,7 +1888,7 @@ void Field_decimal::overflow(bool negative)
int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff,sizeof(buff), &my_charset_bin);
const uchar *from= (uchar*) from_arg;
@@ -2253,7 +2254,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
int Field_decimal::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
if (unsigned_flag && nr < 0)
{
overflow(1);
@@ -2298,7 +2299,7 @@ int Field_decimal::store(double nr)
int Field_decimal::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
char buff[22];
uint length, int_part;
char fyllchar;
@@ -2534,7 +2535,7 @@ void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value,
bool Field_new_decimal::store_value(const my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
DBUG_ENTER("Field_new_decimal::store_value");
#ifndef DBUG_OFF
@@ -2579,7 +2580,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
int Field_new_decimal::store(const char *from, uint length,
CHARSET_INFO *charset_arg)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int err;
my_decimal decimal_value;
DBUG_ENTER("Field_new_decimal::store(char*)");
@@ -2646,7 +2647,7 @@ int Field_new_decimal::store(const char *from, uint length,
int Field_new_decimal::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
my_decimal decimal_value;
int err;
DBUG_ENTER("Field_new_decimal::store(double)");
@@ -2681,7 +2682,7 @@ int Field_new_decimal::store(double nr)
int Field_new_decimal::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
my_decimal decimal_value;
int err;
@@ -2703,7 +2704,7 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
int Field_new_decimal::store_decimal(const my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
return store_value(decimal_value);
}
@@ -2924,7 +2925,7 @@ Field_new_decimal::unpack(uchar* to,
int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error;
longlong rnd;
@@ -2936,7 +2937,7 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_tiny::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -2979,7 +2980,7 @@ int Field_tiny::store(double nr)
int Field_tiny::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
if (unsigned_flag)
@@ -3099,7 +3100,7 @@ void Field_tiny::sql_type(String &res) const
int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int store_tmp;
int error;
longlong rnd;
@@ -3120,7 +3121,7 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_short::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
int16 res;
nr=rint(nr);
@@ -3172,7 +3173,7 @@ int Field_short::store(double nr)
int Field_short::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
int16 res;
@@ -3346,7 +3347,7 @@ void Field_short::sql_type(String &res) const
int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int store_tmp;
int error;
longlong rnd;
@@ -3360,7 +3361,7 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_medium::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -3406,7 +3407,7 @@ int Field_medium::store(double nr)
int Field_medium::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
if (unsigned_flag)
@@ -3536,7 +3537,7 @@ void Field_medium::sql_type(String &res) const
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
long store_tmp;
int error;
longlong rnd;
@@ -3557,7 +3558,7 @@ int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_long::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
int32 res;
nr=rint(nr);
@@ -3609,7 +3610,7 @@ int Field_long::store(double nr)
int Field_long::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
int32 res;
@@ -3783,7 +3784,7 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
char *end;
ulonglong tmp;
@@ -3813,7 +3814,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_longlong::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
longlong res;
@@ -3865,7 +3866,7 @@ int Field_longlong::store(double nr)
int Field_longlong::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
if (nr < 0) // Only possible error
@@ -4091,7 +4092,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_float::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= truncate(&nr, FLT_MAX);
float j= (float)nr;
@@ -4353,7 +4354,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
int Field_double::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= truncate(&nr, DBL_MAX);
#ifdef WORDS_BIGENDIAN
@@ -4780,7 +4781,7 @@ timestamp_auto_set_type Field_timestamp::get_auto_set_type() const
int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME l_time;
my_time_t tmp= 0;
int error;
@@ -4843,7 +4844,7 @@ int Field_timestamp::store(double nr)
int Field_timestamp::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME l_time;
my_time_t timestamp= 0;
int error;
@@ -5142,7 +5143,7 @@ int Field_time::store_time(MYSQL_TIME *ltime, timestamp_type time_type)
int Field_time::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
long tmp;
int error= 0;
if (nr > (double)TIME_MAX_VALUE)
@@ -5180,7 +5181,7 @@ int Field_time::store(double nr)
int Field_time::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
long tmp;
int error= 0;
if (nr < (longlong) -TIME_MAX_VALUE && !unsigned_val)
@@ -5351,7 +5352,7 @@ void Field_time::sql_type(String &res) const
int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
char *end;
int error;
longlong nr= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error);
@@ -5399,7 +5400,7 @@ int Field_year::store(double nr)
int Field_year::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
if (nr < 0 || (nr >= 100 && nr <= 1900) || nr > 2155)
{
*ptr= 0;
@@ -5472,7 +5473,7 @@ void Field_year::sql_type(String &res) const
int Field_date::store(const char *from, uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME l_time;
uint32 tmp;
int error;
@@ -5527,7 +5528,7 @@ int Field_date::store(double nr)
int Field_date::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME not_used;
int error;
longlong initial_nr= nr;
@@ -5706,7 +5707,7 @@ void Field_date::sql_type(String &res) const
int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
long tmp;
MYSQL_TIME l_time;
int error;
@@ -5756,7 +5757,7 @@ int Field_newdate::store(double nr)
int Field_newdate::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME l_time;
longlong tmp;
int error;
@@ -5792,7 +5793,7 @@ int Field_newdate::store(longlong nr, bool unsigned_val)
int Field_newdate::store_time(MYSQL_TIME *ltime,timestamp_type time_type)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
long tmp;
int error= 0;
if (time_type == MYSQL_TIMESTAMP_DATE ||
@@ -5939,7 +5940,7 @@ void Field_newdate::sql_type(String &res) const
int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME time_tmp;
int error;
ulonglong tmp= 0;
@@ -5992,7 +5993,7 @@ int Field_datetime::store(double nr)
int Field_datetime::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
MYSQL_TIME not_used;
int error;
longlong initial_nr= nr;
@@ -6030,7 +6031,7 @@ int Field_datetime::store(longlong nr, bool unsigned_val)
int Field_datetime::store_time(MYSQL_TIME *ltime,timestamp_type time_type)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
longlong tmp;
int error= 0;
/*
@@ -6333,7 +6334,7 @@ Field_longstr::report_if_important_data(const char *ptr, const char *end,
int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
uint copy_length;
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
@@ -6374,7 +6375,7 @@ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_str::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
uint length;
uint local_char_length= field_length / charset()->mbmaxlen;
@@ -6978,7 +6979,7 @@ int Field_varstring::do_save_field_metadata(uchar *metadata_ptr)
int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
uint copy_length;
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
@@ -7642,7 +7643,7 @@ void Field_blob::put_length(uchar *pos, uint32 length)
int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
uint copy_length, new_length;
const char *well_formed_error_pos;
const char *cannot_convert_error_pos;
@@ -8404,7 +8405,7 @@ void Field_enum::store_type(ulonglong value)
int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int err= 0;
uint32 not_used;
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -8453,7 +8454,7 @@ int Field_enum::store(double nr)
int Field_enum::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
if ((ulonglong) nr > typelib->count || nr == 0)
{
@@ -8622,7 +8623,7 @@ Field *Field_enum::new_field(MEM_ROOT *root, struct st_table *new_table,
int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
bool got_warning= 0;
int err= 0;
char *not_used;
@@ -8662,7 +8663,7 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
int Field_set::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int error= 0;
ulonglong max_nr= set_bits(ulonglong, typelib->count);
if ((ulonglong) nr > max_nr)
@@ -8969,7 +8970,7 @@ uint Field_bit::is_equal(Create_field *new_field)
int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int delta;
for (; length && !*from; from++, length--) // skip left 0's
@@ -9380,7 +9381,7 @@ Field_bit_as_char::Field_bit_as_char(uchar *ptr_arg, uint32 len_arg,
int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE;
+ ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
int delta;
uchar bits= (uchar) (field_length & 7);
@@ -9488,6 +9489,8 @@ void Create_field::init_for_tmp_table(enum_field_types sql_type_arg,
((decimals_arg & FIELDFLAG_MAX_DEC) << FIELDFLAG_DEC_SHIFT) |
(maybe_null ? FIELDFLAG_MAYBE_NULL : 0) |
(is_unsigned ? 0 : FIELDFLAG_DECIMAL));
+ vcol_info= 0;
+ stored_in_db= TRUE;
}
@@ -9507,6 +9510,7 @@ void Create_field::init_for_tmp_table(enum_field_types sql_type_arg,
@param fld_interval_list Interval list (if any)
@param fld_charset Field charset
@param fld_geom_type Field geometry type (if any)
+ @param fld_vcol_info Virtual column data
@retval
FALSE on success
@@ -9519,13 +9523,14 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
uint fld_type_modifier, Item *fld_default_value,
Item *fld_on_update_value, LEX_STRING *fld_comment,
char *fld_change, List<String> *fld_interval_list,
- CHARSET_INFO *fld_charset, uint fld_geom_type)
+ CHARSET_INFO *fld_charset, uint fld_geom_type,
+ Virtual_column_info *fld_vcol_info)
{
uint sign_len, allowed_type_modifier= 0;
ulong max_field_charlength= MAX_FIELD_CHARLENGTH;
DBUG_ENTER("Create_field::init()");
-
+
field= 0;
field_name= fld_name;
def= fld_default_value;
@@ -9550,6 +9555,35 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
interval_list.empty();
comment= *fld_comment;
+ stored_in_db= TRUE;
+
+ /* Initialize data for a computed field */
+ if ((uchar)fld_type == (uchar)MYSQL_TYPE_VIRTUAL)
+ {
+ DBUG_ASSERT(vcol_info && vcol_info->expr_item);
+ vcol_info= fld_vcol_info;
+ stored_in_db= vcol_info->is_stored();
+ /*
+ Walk through the Item tree checking if all items are valid
+ to be part of the virtual column
+ */
+ if (vcol_info->expr_item->walk(&Item::check_vcol_func_processor, 0, NULL))
+ {
+ my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), field_name);
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Make a field created for the real type.
+ Note that regular and computed fields differ from each other only by
+ Field::vcol_info. It is is always NULL for a column that is not
+ computed.
+ */
+ sql_type= fld_type= vcol_info->get_real_type();
+ }
+ else
+ vcol_info= NULL;
+
/*
Set NO_DEFAULT_VALUE_FLAG if this field doesn't have a default value and
it is NOT NULL, not an AUTO_INCREMENT field and not a TIMESTAMP.
@@ -9839,7 +9873,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
}
case MYSQL_TYPE_DECIMAL:
DBUG_ASSERT(0); /* Was obsolete */
- }
+ }
/* Remember the value of length */
char_length= length;
@@ -9949,7 +9983,6 @@ uint pack_length_to_packflag(uint type)
return 0; // This shouldn't happen
}
-
Field *make_field(TABLE_SHARE *share, uchar *ptr, uint32 field_length,
uchar *null_pos, uchar null_bit,
uint pack_flag,
@@ -10141,6 +10174,8 @@ Create_field::Create_field(Field *old_field,Field *orig_field)
charset= old_field->charset(); // May be NULL ptr
comment= old_field->comment;
decimals= old_field->decimals();
+ vcol_info= old_field->vcol_info;
+ stored_in_db= old_field->stored_in_db;
/* Fix if the original table had 4 byte pointer blobs */
if (flags & BLOB_FLAG)
diff --git a/sql/field.h b/sql/field.h
index ce92e75ae61..5c36bf80252 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -45,6 +45,80 @@ inline uint get_set_pack_length(int elements)
return len > 4 ? 8 : len;
}
+/*
+ Virtual_column_info is the class to contain additional
+ characteristics that is specific for a virtual/computed
+ field such as:
+ - the defining expression that is evaluated to compute the value
+ of the field
+ - whether the field is to be stored in the database
+ - whether the field is used in a partitioning expression
+*/
+
+class Virtual_column_info: public Sql_alloc
+{
+private:
+ /*
+ The following data is only updated by the parser and read
+ when a Create_field object is created/initialized.
+ */
+ enum_field_types field_type; /* Real field type*/
+ /* Flag indicating that the field is physically stored in the database */
+ my_bool stored_in_db;
+ /* Flag indicating that the field used in a partitioning expression */
+ my_bool in_partitioning_expr;
+
+public:
+ /* The expression to compute the value of the virtual column */
+ Item *expr_item;
+ /* Text representation of the defining expression */
+ LEX_STRING expr_str;
+ /*
+ The list of items created when the defining expression for the virtual
+ column is being parsed and validated. These items are freed in the closefrm
+ function when the table containing this virtual column is removed from
+ the TABLE cache.
+ TODO. Items for all different virtual columns of a table should be put into
+ one list attached to the TABLE structure.
+ */
+ Item *item_free_list;
+
+ Virtual_column_info()
+ : field_type((enum enum_field_types)MYSQL_TYPE_VIRTUAL),
+ stored_in_db(FALSE), in_partitioning_expr(FALSE),
+ expr_item(NULL), item_free_list(NULL)
+ {
+ expr_str.str= NULL;
+ expr_str.length= 0;
+ };
+ ~Virtual_column_info() {}
+ enum_field_types get_real_type()
+ {
+ return field_type;
+ }
+ void set_field_type(enum_field_types fld_type)
+ {
+ /* Calling this function can only be done once. */
+ field_type= fld_type;
+ }
+ bool is_stored()
+ {
+ return stored_in_db;
+ }
+ void set_stored_in_db_flag(bool stored)
+ {
+ stored_in_db= stored;
+ }
+ bool is_in_partitioning_expr()
+ {
+ return in_partitioning_expr;
+ }
+ void mark_as_in_partitioning_expr()
+ {
+ in_partitioning_expr= TRUE;
+ }
+};
+
class Field
{
Field(const Item &); /* Prevent use of these */
@@ -57,7 +131,7 @@ public:
uchar *ptr; // Position to field in record
uchar *null_ptr; // Byte where null_bit is
/*
- Note that you can use table->in_use as replacement for current_thd member
+ Note that you can use table->in_use as replacement for current_thd member
only inside of val_*() and store() members (e.g. you can't use it in cons)
*/
struct st_table *table; // Pointer for table
@@ -67,10 +141,10 @@ public:
/* Field is part of the following keys */
key_map key_start, part_of_key, part_of_key_not_clustered;
key_map part_of_sortkey;
- /*
- We use three additional unireg types for TIMESTAMP to overcome limitation
- of current binary format of .frm file. We'd like to be able to support
- NOW() as default and on update value for such fields but unable to hold
+ /*
+ We use three additional unireg types for TIMESTAMP to overcome limitation
+ of current binary format of .frm file. We'd like to be able to support
+ NOW() as default and on update value for such fields but unable to hold
this info anywhere except unireg_check field. This issue will be resolved
in more clean way with transition to new text based .frm format.
See also comment for Field_timestamp::Field_timestamp().
@@ -103,6 +177,19 @@ public:
*/
bool is_created_from_null_item;
+ /*
+ This is additional data provided for any computed(virtual) field.
+ In particular it includes a pointer to the item by which this field
+ can be computed from other fields.
+ */
+ Virtual_column_info *vcol_info;
+ /*
+ Flag indicating that the field is physically stored in tables
+ rather than just computed from other fields.
+ As of now, FALSE can be set only for computed virtual columns.
+ */
+ bool stored_in_db;
+
Field(uchar *ptr_arg,uint32 length_arg,uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
const char *field_name_arg);
@@ -2055,6 +2142,20 @@ public:
uint8 row,col,sc_length,interval_id; // For rea_create_table
uint offset,pack_flag;
+
+ /*
+ This is additinal data provided for any computed(virtual) field.
+ In particular it includes a pointer to the item by which this field
+ can be computed from other fields.
+ */
+ Virtual_column_info *vcol_info;
+ /*
+ Flag indicating that the field is physically stored in tables
+ rather than just computed from other fields.
+ As of now, FALSE can be set only for computed virtual columns.
+ */
+ bool stored_in_db;
+
Create_field() :after(0) {}
Create_field(Field *field, Field *orig_field);
/* Used to make a clone of this object for ALTER/CREATE TABLE */
@@ -2071,7 +2172,8 @@ public:
char *decimals, uint type_modifier, Item *default_value,
Item *on_update_value, LEX_STRING *comment, char *change,
List<String> *interval_list, CHARSET_INFO *cs,
- uint uint_geom_type);
+ uint uint_geom_type,
+ Virtual_column_info *vcol_info);
bool field_flags_are_binary()
{
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 552ea27970f..8b7d64b4113 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -565,6 +565,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
{
if ((error= select->quick->get_next()))
break;
+ if (!error)
+ update_virtual_fields(sort_form);
file->position(sort_form->record[0]);
DBUG_EXECUTE_IF("debug_filesort", dbug_print_record(sort_form, TRUE););
}
@@ -577,11 +579,13 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select,
error= my_errno ? my_errno : -1; /* Abort */
break;
}
- error=file->rnd_pos(sort_form->record[0],next_pos);
+ error=file->ha_rnd_pos(sort_form->record[0],next_pos);
}
else
{
- error=file->rnd_next(sort_form->record[0]);
+ error=file->ha_rnd_next(sort_form->record[0]);
+ if (!error)
+ update_virtual_fields(sort_form);
if (!flag)
{
my_store_ptr(ref_pos,ref_length,record); // Position to row
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index c8361888146..ebf8ad85410 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1638,7 +1638,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
goto error;
while (TRUE)
{
- if ((result= file->rnd_next(m_rec0)))
+ if ((result= file->ha_rnd_next(m_rec0)))
{
if (result == HA_ERR_RECORD_DELETED)
continue; //Probably MyISAM
@@ -2438,7 +2438,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
DBUG_RETURN(1);
m_start_key.length= 0;
m_rec0= table->record[0];
- m_rec_length= table_share->reclength;
+ m_rec_length= table_share->stored_rec_length;
alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
alloc_len+= table_share->max_key_length;
if (!m_ordered_rec_buffer)
@@ -3611,7 +3611,7 @@ int ha_partition::rnd_next(uchar *buf)
while (TRUE)
{
- result= file->rnd_next(buf);
+ result= file->ha_rnd_next(buf);
if (!result)
{
m_last_part= part_id;
@@ -4461,8 +4461,8 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
}
else if (is_next_same)
{
- if (!(error= file->index_next_same(buf, m_start_key.key,
- m_start_key.length)))
+ if (!(error= file->ha_index_next_same(buf, m_start_key.key,
+ m_start_key.length)))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0);
@@ -4470,7 +4470,7 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
}
else
{
- if (!(error= file->index_next(buf)))
+ if (!(error= file->ha_index_next(buf)))
{
m_last_part= m_part_spec.start_part;
DBUG_RETURN(0); // Row was in range
@@ -4525,13 +4525,26 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
break;
case partition_index_read:
DBUG_PRINT("info", ("index_read on partition %d", i));
- error= file->index_read_map(buf, m_start_key.key,
- m_start_key.keypart_map,
- m_start_key.flag);
+ error= file->ha_index_read_map(buf, m_start_key.key,
+ m_start_key.keypart_map,
+ m_start_key.flag);
break;
case partition_index_first:
DBUG_PRINT("info", ("index_first on partition %d", i));
- error= file->index_first(buf);
+ /*
+ MyISAM engine can fail if we call index_first() when indexes disabled
+ that happens if the table is empty.
+ Here we use file->stats.records instead of file->records() because
+ file->records() is supposed to return an EXACT count, and it can be
+ possibly slow. We don't need an exact number, an approximate one- from
+ the last ::info() call - is sufficient.
+ */
+ if (file->stats.records == 0)
+ {
+ error= HA_ERR_END_OF_FILE;
+ break;
+ }
+ error= file->ha_index_first(buf);
break;
case partition_index_first_unordered:
/*
@@ -4612,23 +4625,49 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
switch (m_index_scan_type) {
case partition_index_read:
- error= file->index_read_map(rec_buf_ptr,
- m_start_key.key,
- m_start_key.keypart_map,
- m_start_key.flag);
+ error= file->ha_index_read_map(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.keypart_map,
+ m_start_key.flag);
break;
case partition_index_first:
- error= file->index_first(rec_buf_ptr);
+ /*
+ MyISAM engine can fail if we call index_first() when indexes disabled
+ that happens if the table is empty.
+ Here we use file->stats.records instead of file->records() because
+ file->records() is supposed to return an EXACT count, and it can be
+ possibly slow. We don't need an exact number, an approximate one- from
+ the last ::info() call - is sufficient.
+ */
+ if (file->stats.records == 0)
+ {
+ error= HA_ERR_END_OF_FILE;
+ break;
+ }
+ error= file->ha_index_first(rec_buf_ptr);
reverse_order= FALSE;
break;
case partition_index_last:
- error= file->index_last(rec_buf_ptr);
+ /*
+ MyISAM engine can fail if we call index_last() when indexes disabled
+ that happens if the table is empty.
+ Here we use file->stats.records instead of file->records() because
+ file->records() is supposed to return an EXACT count, and it can be
+ possibly slow. We don't need an exact number, an approximate one- from
+ the last ::info() call - is sufficient.
+ */
+ if (file->stats.records == 0)
+ {
+ error= HA_ERR_END_OF_FILE;
+ break;
+ }
+ error= file->ha_index_last(rec_buf_ptr);
reverse_order= TRUE;
break;
case partition_index_read_last:
- error= file->index_read_last_map(rec_buf_ptr,
- m_start_key.key,
- m_start_key.keypart_map);
+ error= file->ha_index_read_last_map(rec_buf_ptr,
+ m_start_key.key,
+ m_start_key.keypart_map);
reverse_order= TRUE;
break;
case partition_read_range:
@@ -4730,10 +4769,10 @@ int ha_partition::handle_ordered_next(uchar *buf, bool is_next_same)
memcpy(rec_buf(part_id), table->record[0], m_rec_length);
}
else if (!is_next_same)
- error= file->index_next(rec_buf(part_id));
+ error= file->ha_index_next(rec_buf(part_id));
else
- error= file->index_next_same(rec_buf(part_id), m_start_key.key,
- m_start_key.length);
+ error= file->ha_index_next_same(rec_buf(part_id), m_start_key.key,
+ m_start_key.length);
if (error)
{
if (error == HA_ERR_END_OF_FILE)
@@ -4778,7 +4817,7 @@ int ha_partition::handle_ordered_prev(uchar *buf)
handler *file= m_file[part_id];
DBUG_ENTER("ha_partition::handle_ordered_prev");
- if ((error= file->index_prev(rec_buf(part_id))))
+ if ((error= file->ha_index_prev(rec_buf(part_id))))
{
if (error == HA_ERR_END_OF_FILE)
{
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 5ded3a0f877..7b2e72bd9db 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -250,6 +250,7 @@ public:
DBUG_RETURN(0);
}
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
+ bool check_if_supported_virtual_columns(void) { return TRUE;}
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes);
private:
diff --git a/sql/handler.cc b/sql/handler.cc
index 28d5914a1d3..d511aa154ce 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1236,6 +1236,7 @@ int ha_commit_one_phase(THD *thd, bool all)
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error=1;
}
+ /* Should this be done only if is_real_trans is set ? */
status_var_increment(thd->status_var.ha_commit_count);
ha_info_next= ha_info->next();
ha_info->reset(); /* keep it conveniently zero-filled */
@@ -2123,6 +2124,8 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
dup_ref=ref+ALIGN_SIZE(ref_length);
cached_table_flags= table_flags();
}
+ rows_read= rows_changed= 0;
+ memset(index_rows_read, 0, sizeof(index_rows_read));
DBUG_RETURN(error);
}
@@ -2544,9 +2547,10 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
key_copy(key, table->record[0],
table->key_info + table->s->next_number_index,
table->s->next_number_key_offset);
- error= index_read_map(table->record[1], key,
- make_prev_keypart_map(table->s->next_number_keypart),
- HA_READ_PREFIX_LAST);
+ error= ha_index_read_map(table->record[1], key,
+ make_prev_keypart_map(table->s->
+ next_number_keypart),
+ HA_READ_PREFIX_LAST);
/*
MySQL needs to call us for next row: assume we are inserting ("a",null)
here, we return 3, and next this statement will want to insert
@@ -3576,6 +3580,122 @@ void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info,
}
+/*
+ Updates the global table stats with the TABLE this handler represents
+*/
+
+void handler::update_global_table_stats()
+{
+ TABLE_STATS * table_stats;
+
+ status_var_add(table->in_use->status_var.rows_read, rows_read);
+
+ if (!table->in_use->userstat_running)
+ {
+ rows_read= rows_changed= 0;
+ return;
+ }
+
+ if (rows_read + rows_changed == 0)
+ return; // Nothing to update.
+
+ DBUG_ASSERT(table->s && table->s->table_cache_key.str);
+
+ pthread_mutex_lock(&LOCK_global_table_stats);
+ /* Gets the global table stats, creating one if necessary. */
+ if (!(table_stats= (TABLE_STATS*)
+ hash_search(&global_table_stats,
+ (uchar*) table->s->table_cache_key.str,
+ table->s->table_cache_key.length)))
+ {
+ if (!(table_stats = ((TABLE_STATS*)
+ my_malloc(sizeof(TABLE_STATS),
+ MYF(MY_WME | MY_ZEROFILL)))))
+ {
+ /* Out of memory error already given */
+ goto end;
+ }
+ memcpy(table_stats->table, table->s->table_cache_key.str,
+ table->s->table_cache_key.length);
+ table_stats->table_name_length= table->s->table_cache_key.length;
+ table_stats->engine_type= ht->db_type;
+ /* No need to set variables to 0, as we use MY_ZEROFILL above */
+
+ if (my_hash_insert(&global_table_stats, (uchar*) table_stats))
+ {
+ /* Out of memory error is already given */
+ my_free(table_stats, 0);
+ goto end;
+ }
+ }
+ // Updates the global table stats.
+ table_stats->rows_read+= rows_read;
+ table_stats->rows_changed+= rows_changed;
+ table_stats->rows_changed_x_indexes+= (rows_changed *
+ (table->s->keys ? table->s->keys :
+ 1));
+ rows_read= rows_changed= 0;
+end:
+ pthread_mutex_unlock(&LOCK_global_table_stats);
+}
+
+
+/*
+ Updates the global index stats with this handler's accumulated index reads.
+*/
+
+void handler::update_global_index_stats()
+{
+ DBUG_ASSERT(table->s);
+
+ if (!table->in_use->userstat_running)
+ {
+ /* Reset all index read values */
+ bzero(index_rows_read, sizeof(index_rows_read[0]) * table->s->keys);
+ return;
+ }
+
+ for (uint index = 0; index < table->s->keys; index++)
+ {
+ if (index_rows_read[index])
+ {
+ INDEX_STATS* index_stats;
+ uint key_length;
+ KEY *key_info = &table->key_info[index]; // Rows were read using this
+
+ DBUG_ASSERT(key_info->cache_name);
+ if (!key_info->cache_name)
+ continue;
+ key_length= table->s->table_cache_key.length + key_info->name_length + 1;
+ pthread_mutex_lock(&LOCK_global_index_stats);
+ // Gets the global index stats, creating one if necessary.
+ if (!(index_stats= (INDEX_STATS*) hash_search(&global_index_stats,
+ key_info->cache_name,
+ key_length)))
+ {
+ if (!(index_stats = ((INDEX_STATS*)
+ my_malloc(sizeof(INDEX_STATS),
+ MYF(MY_WME | MY_ZEROFILL)))))
+ goto end; // Error is already given
+
+ memcpy(index_stats->index, key_info->cache_name, key_length);
+ index_stats->index_name_length= key_length;
+ if (my_hash_insert(&global_index_stats, (uchar*) index_stats))
+ {
+ my_free(index_stats, 0);
+ goto end;
+ }
+ }
+ /* Updates the global index stats. */
+ index_stats->rows_read+= index_rows_read[index];
+ index_rows_read[index]= 0;
+end:
+ pthread_mutex_unlock(&LOCK_global_index_stats);
+ }
+ }
+}
+
+
/****************************************************************************
** Some general functions that isn't in the handler class
****************************************************************************/
@@ -4234,17 +4354,16 @@ int handler::read_range_first(const key_range *start_key,
range_key_part= table->key_info[active_index].key_part;
if (!start_key) // Read first record
- result= index_first(table->record[0]);
+ result= ha_index_first(table->record[0]);
else
- result= index_read_map(table->record[0],
- start_key->key,
- start_key->keypart_map,
- start_key->flag);
+ result= ha_index_read_map(table->record[0],
+ start_key->key,
+ start_key->keypart_map,
+ start_key->flag);
if (result)
DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND)
? HA_ERR_END_OF_FILE
: result);
-
DBUG_RETURN (compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE);
}
@@ -4270,11 +4389,11 @@ int handler::read_range_next()
if (eq_range)
{
/* We trust that index_next_same always gives a row in range */
- DBUG_RETURN(index_next_same(table->record[0],
- end_range->key,
- end_range->length));
+ DBUG_RETURN(ha_index_next_same(table->record[0],
+ end_range->key,
+ end_range->length));
}
- result= index_next(table->record[0]);
+ result= ha_index_next(table->record[0]);
if (result)
DBUG_RETURN(result);
DBUG_RETURN(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE);
@@ -4656,6 +4775,7 @@ int handler::ha_write_row(uchar *buf)
if (unlikely(error= write_row(buf)))
DBUG_RETURN(error);
+ rows_changed++;
if (unlikely(error= binlog_log_row(table, 0, buf, log_func)))
DBUG_RETURN(error); /* purecov: inspected */
DBUG_RETURN(0);
@@ -4677,6 +4797,7 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data)
if (unlikely(error= update_row(old_data, new_data)))
return error;
+ rows_changed++;
if (unlikely(error= binlog_log_row(table, old_data, new_data, log_func)))
return error;
return 0;
@@ -4691,6 +4812,7 @@ int handler::ha_delete_row(const uchar *buf)
if (unlikely(error= delete_row(buf)))
return error;
+ rows_changed++;
if (unlikely(error= binlog_log_row(table, buf, 0, log_func)))
return error;
return 0;
diff --git a/sql/handler.h b/sql/handler.h
index 0e4033512d1..61ff5fca565 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -30,6 +30,10 @@
#define USING_TRANSACTIONS
+#if MAX_KEY > 128
+#error MAX_KEY is too large. Values up to 128 are supported.
+#endif
+
// the following is for checking tables
#define HA_ADMIN_ALREADY_DONE 1
@@ -600,8 +604,9 @@ struct handlerton
SHOW_COMP_OPTION state;
/*
- Historical number used for frm file to determine the correct storage engine.
- This is going away and new engines will just use "name" for this.
+ Historical number used for frm file to determine the correct
+ storage engine. This is going away and new engines will just use
+ "name" for this.
*/
enum legacy_db_type db_type;
/*
@@ -1146,6 +1151,12 @@ public:
Interval returned by get_auto_increment() and being consumed by the
inserter.
*/
+ /* Statistics variables */
+ ulonglong rows_read;
+ ulonglong rows_changed;
+ /* One bigger than needed to avoid to test if key == MAX_KEY */
+ ulonglong index_rows_read[MAX_KEY+1];
+
Discrete_interval auto_inc_interval_for_cur_row;
/**
Number of reserved auto-increment intervals. Serves as a heuristic
@@ -1164,7 +1175,10 @@ public:
locked(FALSE), implicit_emptied(0),
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
auto_inc_intervals_count(0)
- {}
+ {
+ reset_statistics();
+ }
+
virtual ~handler(void)
{
DBUG_ASSERT(locked == FALSE);
@@ -1286,10 +1300,16 @@ public:
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
+ void reset_statistics()
+ {
+ rows_read= rows_changed= 0;
+ bzero(index_rows_read, sizeof(index_rows_read));
+ }
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
table= table_arg;
table_share= share;
+ reset_statistics();
}
virtual double scan_time()
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
@@ -1398,22 +1418,23 @@ public:
}
/**
@brief
- Positions an index cursor to the index specified in the handle. Fetches the
- row if available. If the key value is null, begin at the first key of the
- index.
+ Positions an index cursor to the index specified in the
+ handle. Fetches the row if available. If the key value is null,
+ begin at the first key of the index.
*/
+protected:
virtual int index_read_map(uchar * buf, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
uint key_len= calculate_key_len(table, active_index, key, keypart_map);
- return index_read(buf, key, key_len, find_flag);
+ return index_read(buf, key, key_len, find_flag);
}
/**
@brief
- Positions an index cursor to the index specified in the handle. Fetches the
- row if available. If the key value is null, begin at the first key of the
- index.
+ Positions an index cursor to the index specified in the
+ handle. Fetches the row if available. If the key value is null,
+ begin at the first key of the index.
*/
virtual int index_read_idx_map(uchar * buf, uint index, const uchar * key,
key_part_map keypart_map,
@@ -1438,6 +1459,79 @@ public:
uint key_len= calculate_key_len(table, active_index, key, keypart_map);
return index_read_last(buf, key, key_len);
}
+ inline void update_index_statistics()
+ {
+ index_rows_read[active_index]++;
+ rows_read++;
+ }
+public:
+
+ /* Similar functions like the above, but does statistics counting */
+ inline int ha_index_read_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
+ {
+ int error= index_read_map(buf, key, keypart_map, find_flag);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+ inline int ha_index_read_idx_map(uchar * buf, uint index, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
+ {
+ int error= index_read_idx_map(buf, index, key, keypart_map, find_flag);
+ if (!error)
+ {
+ rows_read++;
+ index_rows_read[index]++;
+ }
+ return error;
+ }
+ inline int ha_index_next(uchar * buf)
+ {
+ int error= index_next(buf);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+ inline int ha_index_prev(uchar * buf)
+ {
+ int error= index_prev(buf);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+ inline int ha_index_first(uchar * buf)
+ {
+ int error= index_first(buf);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+ inline int ha_index_last(uchar * buf)
+ {
+ int error= index_last(buf);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+ inline int ha_index_next_same(uchar *buf, const uchar *key, uint keylen)
+ {
+ int error= index_next_same(buf, key, keylen);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+ inline int ha_index_read_last_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map)
+ {
+ int error= index_read_last_map(buf, key, keypart_map);
+ if (!error)
+ update_index_statistics();
+ return error;
+ }
+
virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE *ranges, uint range_count,
bool sorted, HANDLER_BUFFER *buffer);
@@ -1451,6 +1545,7 @@ public:
void ft_end() { ft_handler=NULL; }
virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
{ return NULL; }
+private:
virtual int ft_read(uchar *buf) { return HA_ERR_WRONG_COMMAND; }
virtual int rnd_next(uchar *buf)=0;
virtual int rnd_pos(uchar * buf, uchar *pos)=0;
@@ -1461,11 +1556,50 @@ public:
handlers for random position.
*/
virtual int rnd_pos_by_record(uchar *record)
- {
- position(record);
- return rnd_pos(record, ref);
- }
+ {
+ position(record);
+ return rnd_pos(record, ref);
+ }
virtual int read_first_row(uchar *buf, uint primary_key);
+public:
+
+ /* Same as above, but with statistics */
+ inline int ha_ft_read(uchar *buf)
+ {
+ int error= ft_read(buf);
+ if (!error)
+ rows_read++;
+ return error;
+ }
+ inline int ha_rnd_next(uchar *buf)
+ {
+ int error= rnd_next(buf);
+ if (!error)
+ rows_read++;
+ return error;
+ }
+ inline int ha_rnd_pos(uchar *buf, uchar *pos)
+ {
+ int error= rnd_pos(buf, pos);
+ if (!error)
+ rows_read++;
+ return error;
+ }
+ inline int ha_rnd_pos_by_record(uchar *buf)
+ {
+ int error= rnd_pos_by_record(buf);
+ if (!error)
+ rows_read++;
+ return error;
+ }
+ inline int ha_read_first_row(uchar *buf, uint primary_key)
+ {
+ int error= read_first_row(buf, primary_key);
+ if (!error)
+ rows_read++;
+ return error;
+ }
+
/**
The following 3 function is only needed for tables that may be
internal temporary tables during joins.
@@ -1634,6 +1768,9 @@ public:
virtual bool is_crashed() const { return 0; }
virtual bool auto_repair() const { return 0; }
+ void update_global_table_stats();
+ void update_global_index_stats();
+
#define CHF_CREATE_FLAG 0
#define CHF_DELETE_FLAG 1
#define CHF_RENAME_FLAG 2
@@ -1768,6 +1905,17 @@ public:
LEX_STRING *engine_name() { return hton_name(ht); }
+ /*
+ @brief
+ Check whether the engine supports virtual columns
+
+ @retval
+ FALSE if the engine does not support virtual columns
+ @retval
+ TRUE if the engine supports virtual columns
+ */
+ virtual bool check_if_supported_virtual_columns(void) { return FALSE;}
+
protected:
/* Service methods for use by storage engines. */
void ha_statistic_increment(ulong SSV::*offset) const;
@@ -1952,6 +2100,7 @@ private:
{ return HA_ERR_WRONG_COMMAND; }
virtual int rename_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
+ friend class ha_partition;
};
diff --git a/sql/item.cc b/sql/item.cc
index 5160a083cfe..36918a0804e 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -681,9 +681,24 @@ bool Item_field::register_field_in_read_map(uchar *arg)
TABLE *table= (TABLE *) arg;
if (field->table == table || !table)
bitmap_set_bit(field->table->read_set, field->field_index);
+ if (field->vcol_info && field->vcol_info->expr_item)
+ return field->vcol_info->expr_item->walk(&Item::register_field_in_read_map,
+ 1, arg);
return 0;
}
+/*
+ @brief
+ Mark field in bitmap supplied as *arg
+*/
+
+bool Item_field::register_field_in_bitmap(uchar *arg)
+{
+ MY_BITMAP *bitmap= (MY_BITMAP *) arg;
+ DBUG_ASSERT(bitmap);
+ bitmap_set_bit(bitmap, field->field_index);
+ return 0;
+}
bool Item::check_cols(uint c)
{
@@ -4450,6 +4465,21 @@ error:
return TRUE;
}
+/*
+ @brief
+ Mark virtual columns as used in a partitioning expression
+*/
+
+bool Item_field::vcol_in_partition_func_processor(uchar *int_arg)
+{
+ DBUG_ASSERT(fixed);
+ if (field->vcol_info)
+ {
+ field->vcol_info->mark_as_in_partitioning_expr();
+ }
+ return FALSE;
+}
+
Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs)
{
diff --git a/sql/item.h b/sql/item.h
index 91191516fcd..aeec1e92b4e 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -18,6 +18,24 @@
#pragma interface /* gcc class implementation */
#endif
+inline
+bool trace_unsupported_func(const char *where, const char *processor_name)
+{
+ char buff[64];
+ sprintf(buff, "%s::%s", where, processor_name);
+ DBUG_ENTER(buff);
+ sprintf(buff, "%s returns TRUE: unsupported function", processor_name);
+ DBUG_PRINT("info", (buff));
+ DBUG_RETURN(TRUE);
+}
+
+inline
+bool trace_unsupported_by_check_vcol_func_processor(const char *where)
+{
+ return trace_unsupported_func(where, "check_vcol_func_processor");
+}
+
+
class Protocol;
struct TABLE_LIST;
void item_init(void); /* Init item functions */
@@ -902,6 +920,11 @@ public:
virtual bool enumerate_field_refs_processor(uchar *arg) { return 0; }
virtual bool mark_as_eliminated_processor(uchar *arg) { return 0; }
/*
+ The next function differs from the previous one that a bitmap to be updated
+ is passed as uchar *arg.
+ */
+ virtual bool register_field_in_bitmap(uchar *arg) { return 0; }
+ /*
Check if a partition function is allowed
SYNOPSIS
check_partition_func_processor()
@@ -953,11 +976,43 @@ public:
fields.
*/
virtual bool check_partition_func_processor(uchar *bool_arg) { return TRUE;}
+ /*
+ @brief
+ Processor used to mark virtual columns used in partitioning expression
+
+ @param
+ arg always ignored
+
+ @retval
+ FALSE always
+ */
+ virtual bool vcol_in_partition_func_processor(uchar *arg)
+ {
+ return FALSE;
+ }
+
virtual bool subst_argument_checker(uchar **arg)
- {
+ {
if (*arg)
- *arg= NULL;
- return TRUE;
+ *arg= NULL;
+ return TRUE;
+ }
+ /*
+ @brief
+ Processor used to check acceptability of an item in the defining
+ expression for a virtual column
+
+ @param
+ arg always ignored
+
+ @retval
+ FALSE the item is accepted in the definition of a virtual column
+ @retval
+ TRUE otherwise
+ */
+ virtual bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(full_name());
}
virtual Item *equal_fields_propagator(uchar * arg) { return this; }
@@ -1334,6 +1389,10 @@ public:
{
return value_item->send(protocol, str);
}
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("name_const");
+ }
};
bool agg_item_collations(DTCollation &c, const char *name,
@@ -1353,6 +1412,7 @@ public:
virtual Item_num *neg()= 0;
Item *safe_charset_converter(CHARSET_INFO *tocs);
bool check_partition_func_processor(uchar *int_arg) { return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
#define NO_CACHED_FIELD_INDEX ((uint)(-1))
@@ -1512,7 +1572,10 @@ public:
bool collect_item_field_processor(uchar * arg);
bool find_item_in_field_list_processor(uchar *arg);
bool register_field_in_read_map(uchar *arg);
+ bool register_field_in_bitmap(uchar *arg);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool vcol_in_partition_func_processor(uchar *bool_arg);
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
bool enumerate_field_refs_processor(uchar *arg);
void cleanup();
bool result_as_longlong()
@@ -1573,6 +1636,7 @@ public:
Item *safe_charset_converter(CHARSET_INFO *tocs);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
class Item_null_result :public Item_null
@@ -1586,7 +1650,11 @@ public:
save_in_field(result_field, no_conversions);
}
bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
-};
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(full_name());
+ }
+};
/* Item represents one placeholder ('?') of prepared statement */
@@ -1727,6 +1795,7 @@ public:
/** Item is a argument to a limit clause. */
bool limit_clause_param;
void set_param_type_and_swap_value(Item_param *from);
+
};
@@ -1762,6 +1831,7 @@ public:
{ return (uint)(max_length - test(value < 0)); }
bool eq(const Item *, bool binary_cmp) const;
bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
+ bool check_vcol_func_processor(uchar arg) { return FALSE;}
};
@@ -1780,6 +1850,7 @@ public:
Item_num *neg ();
uint decimal_precision() const { return max_length; }
bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
@@ -1821,6 +1892,7 @@ public:
bool eq(const Item *, bool binary_cmp) const;
void set_decimal_value(my_decimal *value_par);
bool check_partition_func_processor(uchar *bool_arg) { return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
@@ -1978,6 +2050,7 @@ public:
}
virtual void print(String *str, enum_query_type query_type);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
/**
Return TRUE if character-set-introducer was explicitly specified in the
@@ -2031,7 +2104,7 @@ double_from_string_with_check (CHARSET_INFO *cs, const char *cptr, char *end);
class Item_static_string_func :public Item_string
{
const char *func_name;
-public:
+ public:
Item_static_string_func(const char *name_par, const char *str, uint length,
CHARSET_INFO *cs,
Derivation dv= DERIVATION_COERCIBLE)
@@ -2045,6 +2118,10 @@ public:
}
bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name);
+ }
};
@@ -2056,6 +2133,10 @@ public:
CHARSET_INFO *cs= NULL):
Item_string(name_arg, length, cs)
{}
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("safe_string");
+ }
};
@@ -2135,6 +2216,7 @@ public:
bool eq(const Item *item, bool binary_cmp) const;
virtual Item *safe_charset_converter(CHARSET_INFO *tocs);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
@@ -2165,6 +2247,7 @@ public:
save_in_field(result_field, no_conversions);
}
void cleanup();
+ bool check_vcol_func_processor(uchar *arg) { return FALSE;}
};
@@ -2294,7 +2377,10 @@ public:
if (ref && result_type() == ROW_RESULT)
(*ref)->bring_value();
}
-
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("ref");
+ }
};
@@ -2570,6 +2656,10 @@ public:
table_map used_tables() const { return (table_map) 1L; }
bool const_item() const { return 0; }
bool is_null() { return null_value; }
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("copy");
+ }
/*
Override the methods below as pure virtual to make sure all the
@@ -2812,6 +2902,10 @@ public:
return arg->walk(processor, walk_subquery, args) ||
(this->*processor)(args);
}
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("values");
+ }
};
@@ -2908,6 +3002,10 @@ private:
BEFORE INSERT of BEFORE UPDATE trigger.
*/
bool read_only;
+ virtual bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("trigger");
+ }
};
@@ -2967,6 +3065,11 @@ public:
{
return this == item;
}
+ bool check_vcol_func_processor(uchar *arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("cache");
+ }
+
};
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 45d874b9cd9..7dbf07ab86f 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -3936,10 +3936,30 @@ bool Item_func_set_user_var::register_field_in_read_map(uchar *arg)
TABLE *table= (TABLE *) arg;
if (result_field->table == table || !table)
bitmap_set_bit(result_field->table->read_set, result_field->field_index);
+ if (result_field->vcol_info)
+ return result_field->vcol_info->
+ expr_item->walk(&Item::register_field_in_read_map, 1, arg);
}
return 0;
}
+/*
+ Mark field in bitmap supplied as *arg
+
+*/
+
+bool Item_func_set_user_var::register_field_in_bitmap(uchar *arg)
+{
+ MY_BITMAP *bitmap = (MY_BITMAP *) arg;
+ DBUG_ASSERT(bitmap);
+ if (result_field)
+ {
+ if (!bitmap)
+ return 1;
+ bitmap_set_bit(bitmap, result_field->field_index);
+ }
+ return 0;
+}
/**
Set value to user variable.
diff --git a/sql/item_func.h b/sql/item_func.h
index eee3a7ef0f6..8231963dfd8 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -339,6 +339,7 @@ public:
void fix_length_and_dec();
bool fix_fields(THD *thd, Item **ref);
longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
+ bool check_vcol_func_processor(uchar *int_arg) { return TRUE;}
};
@@ -399,6 +400,7 @@ public:
Item_func_additive_op(Item *a,Item *b) :Item_num_op(a,b) {}
void result_precision();
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -434,6 +436,7 @@ public:
my_decimal *decimal_op(my_decimal *);
void result_precision();
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -466,6 +469,7 @@ public:
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -480,6 +484,7 @@ public:
void result_precision();
void fix_length_and_dec();
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -496,6 +501,7 @@ public:
void fix_num_length_and_dec();
uint decimal_precision() const { return args[0]->decimal_precision(); }
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -509,6 +515,7 @@ public:
const char *func_name() const { return "abs"; }
void fix_length_and_dec();
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
// A class to handle logarithmic and trigonometric functions
@@ -664,6 +671,7 @@ public:
double real_op();
my_decimal *decimal_op(my_decimal *);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -676,6 +684,7 @@ public:
double real_op();
my_decimal *decimal_op(my_decimal *);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
/* This handles round and truncate */
@@ -707,6 +716,10 @@ public:
void update_used_tables();
bool fix_fields(THD *thd, Item **ref);
void cleanup() { first_eval= TRUE; Item_real_func::cleanup(); }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
private:
void seed_random (Item * val);
};
@@ -989,6 +1002,10 @@ public:
max_length= args[0]->max_length;
}
bool fix_fields(THD *thd, Item **ref);
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1002,6 +1019,10 @@ public:
const char *func_name() const { return "benchmark"; }
void fix_length_and_dec() { max_length=1; maybe_null=0; }
virtual void print(String *str, enum_query_type query_type);
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1017,6 +1038,10 @@ public:
used_tables_cache|= RAND_TABLE_BIT;
}
longlong val_int();
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1266,6 +1291,10 @@ class Item_func_get_lock :public Item_int_func
longlong val_int();
const char *func_name() const { return "get_lock"; }
void fix_length_and_dec() { max_length=1; maybe_null=1;}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
class Item_func_release_lock :public Item_int_func
@@ -1276,6 +1305,10 @@ public:
longlong val_int();
const char *func_name() const { return "release_lock"; }
void fix_length_and_dec() { max_length=1; maybe_null=1;}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
/* replication functions */
@@ -1289,6 +1322,10 @@ public:
longlong val_int();
const char *func_name() const { return "master_pos_wait"; }
void fix_length_and_dec() { max_length=21; maybe_null=1;}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1360,6 +1397,7 @@ public:
}
void save_org_in_field(Field *field) { (void)save_in_field(field, 1, 0); }
bool register_field_in_read_map(uchar *arg);
+ bool register_field_in_bitmap(uchar *arg);
bool set_entry(THD *thd, bool create_if_not_exists);
void cleanup();
};
@@ -1534,6 +1572,11 @@ public:
bool fix_index();
void init_search(bool no_order);
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ /* TODO: consider adding in support for the MATCH-based virtual columns */
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1553,6 +1596,17 @@ public:
longlong val_int();
const char *func_name() const { return "is_free_lock"; }
void fix_length_and_dec() { decimals=0; max_length=1; maybe_null=1;}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+#if 0
+ DBUG_ENTER("Item_func_is_free_lock::check_vcol_func_processor");
+ DBUG_PRINT("info",
+ ("check_vcol_func_processor returns TRUE: unsupported function"));
+ DBUG_RETURN(TRUE);
+#else
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+#endif
+ }
};
class Item_func_is_used_lock :public Item_int_func
@@ -1563,6 +1617,10 @@ public:
longlong val_int();
const char *func_name() const { return "is_used_lock"; }
void fix_length_and_dec() { decimals=0; max_length=10; maybe_null=1;}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
/* For type casts */
@@ -1582,6 +1640,11 @@ public:
longlong val_int();
const char *func_name() const { return "row_count"; }
void fix_length_and_dec() { decimals= 0; maybe_null=0; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1690,6 +1753,10 @@ public:
{
return sp_result_field;
}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1700,6 +1767,10 @@ public:
longlong val_int();
const char *func_name() const { return "found_rows"; }
void fix_length_and_dec() { decimals= 0; maybe_null=0; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -1714,5 +1785,9 @@ public:
void fix_length_and_dec()
{ max_length= 21; unsigned_flag=1; }
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
diff --git a/sql/item_row.h b/sql/item_row.h
index 67441f49603..71679100448 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -76,4 +76,5 @@ public:
bool check_cols(uint c);
bool null_inside() { return with_null; };
void bring_value();
-};
+ bool check_vcol_func_processor(uchar *int_arg) {return FALSE; }
+ };
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 8625add8d2d..6b424989c58 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -344,6 +344,10 @@ public:
String *val_str(String *);
void fix_length_and_dec() { maybe_null=1; max_length = 13; }
const char *func_name() const { return "encrypt"; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
#include "sql_crypt.h"
@@ -381,6 +385,11 @@ public:
call
*/
virtual const char *fully_qualified_func_name() const = 0;
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(
+ fully_qualified_func_name());
+ }
};
@@ -644,6 +653,10 @@ public:
maybe_null=1;
max_length=MAX_BLOB_WIDTH;
}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -840,5 +853,9 @@ public:
}
const char *func_name() const{ return "uuid"; }
String *val_str(String *);
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index e3cf96d29ca..cc5021612bf 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -2049,7 +2049,7 @@ int subselect_uniquesubquery_engine::scan_table()
table->null_row= 0;
for (;;)
{
- error=table->file->rnd_next(table->record[0]);
+ error=table->file->ha_rnd_next(table->record[0]);
if (error && error != HA_ERR_END_OF_FILE)
{
error= report_error(table, error);
@@ -2223,10 +2223,11 @@ int subselect_uniquesubquery_engine::exec()
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key, 0);
- error= table->file->index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->
+ ref.key_parts),
+ HA_READ_KEY_EXACT);
if (error &&
error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
error= report_error(table, error);
@@ -2344,10 +2345,11 @@ int subselect_indexsubquery_engine::exec()
if (!table->file->inited)
table->file->ha_index_init(tab->ref.key, 1);
- error= table->file->index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->
+ ref.key_parts),
+ HA_READ_KEY_EXACT);
if (error &&
error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
error= report_error(table, error);
@@ -2368,9 +2370,9 @@ int subselect_indexsubquery_engine::exec()
((Item_in_subselect *) item)->value= 1;
break;
}
- error= table->file->index_next_same(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length);
+ error= table->file->ha_index_next_same(table->record[0],
+ tab->ref.key_buff,
+ tab->ref.key_length);
if (error && error != HA_ERR_END_OF_FILE)
{
error= report_error(table, error);
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 19d58c65259..63a9acb95f0 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -136,6 +136,10 @@ public:
bool walk(Item_processor processor, bool walk_subquery, uchar *arg);
bool mark_as_eliminated_processor(uchar *arg);
bool enumerate_field_refs_processor(uchar *arg);
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("subselect");
+ }
/**
Get the SELECT_LEX structure associated with this Item.
diff --git a/sql/item_sum.h b/sql/item_sum.h
index e884452d6e6..d0723fa0ab0 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -399,6 +399,10 @@ public:
Item *get_arg(int i) { return args[i]; }
Item *set_arg(int i, THD *thd, Item *new_val);
uint get_arg_count() { return arg_count; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -679,6 +683,10 @@ public:
}
void fix_length_and_dec() {}
enum Item_result result_type () const { return hybrid_type; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("avg_field");
+ }
};
@@ -747,6 +755,10 @@ public:
}
void fix_length_and_dec() {}
enum Item_result result_type () const { return hybrid_type; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("var_field");
+ }
};
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 9e3c2e8c89f..26e47e43d05 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -70,6 +70,7 @@ public:
enum_monotonicity_info get_monotonicity_info() const;
longlong val_int_endpoint(bool left_endp, bool *incl_endp);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -86,6 +87,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -111,6 +113,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -124,6 +127,7 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
void fix_length_and_dec();
bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
+ bool check_vcol_func_processor(uchar *int_arg) {return FALSE;}
};
@@ -140,6 +144,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -156,6 +161,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -172,6 +178,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -188,6 +195,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -204,6 +212,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -234,6 +243,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -252,6 +262,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -282,6 +293,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
class Item_func_dayname :public Item_func_weekday
@@ -294,6 +306,7 @@ class Item_func_dayname :public Item_func_weekday
enum Item_result result_type () const { return STRING_RESULT; }
void fix_length_and_dec();
bool check_partition_func_processor(uchar *int_arg) {return TRUE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -310,6 +323,14 @@ public:
decimals=0;
max_length=10*MY_CHARSET_BIN_MB_MAXLEN;
}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ /*
+ TODO: Allow UNIX_TIMESTAMP called with an argument to be a part
+ of the expression for a virtual column
+ */
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -325,6 +346,7 @@ public:
max_length=10*MY_CHARSET_BIN_MB_MAXLEN;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -445,6 +467,10 @@ public:
*/
virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0;
bool result_as_longlong() { return TRUE; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -481,6 +507,10 @@ public:
void fix_length_and_dec();
bool get_date(MYSQL_TIME *res, uint fuzzy_date);
virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0;
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -521,6 +551,10 @@ public:
void fix_length_and_dec();
bool get_date(MYSQL_TIME *res, uint fuzzy_date);
virtual void store_now_in_TIME(MYSQL_TIME *now_time)=0;
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
@@ -578,6 +612,7 @@ public:
const char *func_name() const { return "from_days"; }
bool get_date(MYSQL_TIME *res, uint fuzzy_date);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -705,6 +740,7 @@ class Item_extract :public Item_int_func
bool eq(const Item *item, bool binary_cmp) const;
virtual void print(String *str, enum_query_type query_type);
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
@@ -953,6 +989,7 @@ public:
maybe_null=1;
}
bool check_partition_func_processor(uchar *int_arg) {return FALSE;}
+ bool check_vcol_func_processor(uchar *int_arg) { return FALSE;}
};
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 100192cd091..15165a9c435 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -220,6 +220,11 @@ public:
collation.collation= pxml->charset();
}
const char *func_name() const { return "nodeset"; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
+
};
@@ -526,6 +531,10 @@ public:
enum Type type() const { return XPATH_NODESET_CMP; };
const char *func_name() const { return "xpath_nodeset_to_const_comparator"; }
bool is_bool_func() { return 1; }
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
longlong val_int()
{
diff --git a/sql/item_xmlfunc.h b/sql/item_xmlfunc.h
index dadbb5ccf42..ce33d161c79 100644
--- a/sql/item_xmlfunc.h
+++ b/sql/item_xmlfunc.h
@@ -40,6 +40,10 @@ public:
}
void fix_length_and_dec();
String *parse_xml(String *raw_xml, String *parsed_xml_buf);
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor(func_name());
+ }
};
diff --git a/sql/lex.h b/sql/lex.h
index d9c382347c9..47b563c2c85 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -62,6 +62,7 @@ static SYMBOL symbols[] = {
{ "ALL", SYM(ALL)},
{ "ALGORITHM", SYM(ALGORITHM_SYM)},
{ "ALTER", SYM(ALTER)},
+ { "ALWAYS", SYM(ALWAYS_SYM)},
{ "ANALYZE", SYM(ANALYZE_SYM)},
{ "AND", SYM(AND_SYM)},
{ "ANY", SYM(ANY_SYM)},
@@ -106,6 +107,7 @@ static SYMBOL symbols[] = {
{ "CHECKSUM", SYM(CHECKSUM_SYM)},
{ "CIPHER", SYM(CIPHER_SYM)},
{ "CLIENT", SYM(CLIENT_SYM)},
+ { "CLIENT_STATISTICS", SYM(CLIENT_STATS_SYM)},
{ "CLOSE", SYM(CLOSE_SYM)},
{ "COALESCE", SYM(COALESCE)},
{ "CODE", SYM(CODE_SYM)},
@@ -220,6 +222,7 @@ static SYMBOL symbols[] = {
{ "FULL", SYM(FULL)},
{ "FULLTEXT", SYM(FULLTEXT_SYM)},
{ "FUNCTION", SYM(FUNCTION_SYM)},
+ { "GENERATED", SYM(GENERATED_SYM)},
{ "GEOMETRY", SYM(GEOMETRY_SYM)},
{ "GEOMETRYCOLLECTION",SYM(GEOMETRYCOLLECTION)},
{ "GET_FORMAT", SYM(GET_FORMAT)},
@@ -245,6 +248,7 @@ static SYMBOL symbols[] = {
{ "IN", SYM(IN_SYM)},
{ "INDEX", SYM(INDEX_SYM)},
{ "INDEXES", SYM(INDEXES)},
+ { "INDEX_STATISTICS", SYM(INDEX_STATS_SYM)},
{ "INFILE", SYM(INFILE)},
{ "INITIAL_SIZE", SYM(INITIAL_SIZE_SYM)},
{ "INNER", SYM(INNER_SYM)},
@@ -385,14 +389,16 @@ static SYMBOL symbols[] = {
{ "OUTFILE", SYM(OUTFILE)},
{ "OWNER", SYM(OWNER_SYM)},
{ "PACK_KEYS", SYM(PACK_KEYS_SYM)},
- { "PARSER", SYM(PARSER_SYM)},
{ "PAGE", SYM(PAGE_SYM)},
{ "PAGE_CHECKSUM", SYM(PAGE_CHECKSUM_SYM)},
+ { "PARSER", SYM(PARSER_SYM)},
+ { "PARSE_VCOL_EXPR", SYM(PARSE_VCOL_EXPR_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
{ "PARTITION", SYM(PARTITION_SYM)},
{ "PARTITIONING", SYM(PARTITIONING_SYM)},
{ "PARTITIONS", SYM(PARTITIONS_SYM)},
{ "PASSWORD", SYM(PASSWORD)},
+ { "PERSISTENT", SYM(PERSISTENT_SYM)},
{ "PHASE", SYM(PHASE_SYM)},
{ "PLUGIN", SYM(PLUGIN_SYM)},
{ "PLUGINS", SYM(PLUGINS_SYM)},
@@ -478,6 +484,7 @@ static SYMBOL symbols[] = {
{ "SIGNED", SYM(SIGNED_SYM)},
{ "SIMPLE", SYM(SIMPLE_SYM)},
{ "SLAVE", SYM(SLAVE)},
+ { "SLOW", SYM(SLOW_SYM)},
{ "SNAPSHOT", SYM(SNAPSHOT_SYM)},
{ "SMALLINT", SYM(SMALLINT)},
{ "SOCKET", SYM(SOCKET_SYM)},
@@ -526,6 +533,7 @@ static SYMBOL symbols[] = {
{ "TABLE", SYM(TABLE_SYM)},
{ "TABLES", SYM(TABLES)},
{ "TABLESPACE", SYM(TABLESPACE)},
+ { "TABLE_STATISTICS", SYM(TABLE_STATS_SYM)},
{ "TABLE_CHECKSUM", SYM(TABLE_CHECKSUM_SYM)},
{ "TEMPORARY", SYM(TEMPORARY)},
{ "TEMPTABLE", SYM(TEMPTABLE_SYM)},
@@ -569,6 +577,7 @@ static SYMBOL symbols[] = {
{ "USE", SYM(USE_SYM)},
{ "USER", SYM(USER)},
{ "USER_RESOURCES", SYM(RESOURCES)},
+ { "USER_STATISTICS", SYM(USER_STATS_SYM)},
{ "USE_FRM", SYM(USE_FRM)},
{ "USING", SYM(USING)},
{ "UTC_DATE", SYM(UTC_DATE_SYM)},
@@ -581,13 +590,14 @@ static SYMBOL symbols[] = {
{ "VARCHARACTER", SYM(VARCHAR)},
{ "VARIABLES", SYM(VARIABLES)},
{ "VARYING", SYM(VARYING)},
+ { "VIEW", SYM(VIEW_SYM)},
+ { "VIRTUAL", SYM(VIRTUAL_SYM)},
{ "WAIT", SYM(WAIT_SYM)},
{ "WARNINGS", SYM(WARNINGS)},
{ "WEEK", SYM(WEEK_SYM)},
{ "WHEN", SYM(WHEN_SYM)},
{ "WHERE", SYM(WHERE)},
{ "WHILE", SYM(WHILE_SYM)},
- { "VIEW", SYM(VIEW_SYM)},
{ "WITH", SYM(WITH)},
{ "WORK", SYM(WORK_SYM)},
{ "WRAPPER", SYM(WRAPPER_SYM)},
diff --git a/sql/log.cc b/sql/log.cc
index 9ed594bfb36..4b8efc45cb0 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -824,6 +824,13 @@ void Log_to_file_event_handler::flush()
mysql_slow_log.reopen_file();
}
+void Log_to_file_event_handler::flush_slow_log()
+{
+ /* reopen slow log file */
+ if (opt_slow_log)
+ mysql_slow_log.reopen_file();
+}
+
/*
Log error with all enabled log event handlers
@@ -919,8 +926,6 @@ void LOGGER::init_log_tables()
bool LOGGER::flush_logs(THD *thd)
{
- int rc= 0;
-
/*
Now we lock logger, as nobody should be able to use logging routines while
log tables are closed
@@ -932,7 +937,24 @@ bool LOGGER::flush_logs(THD *thd)
/* end of log flush */
logger.unlock();
- return rc;
+ return 0;
+}
+
+
+bool LOGGER::flush_slow_log(THD *thd)
+{
+ /*
+ Now we lock logger, as nobody should be able to use logging routines while
+ log tables are closed
+ */
+ logger.lock_exclusive();
+
+ /* reopen log files */
+ file_log_handler->flush_slow_log();
+
+ /* end of log flush */
+ logger.unlock();
+ return 0;
}
@@ -4090,6 +4112,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
if (likely(is_open()))
{
IO_CACHE *file= &log_file;
+ my_off_t my_org_b_tell;
#ifdef HAVE_REPLICATION
/*
In the future we need to add to the following if tests like
@@ -4105,6 +4128,8 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
}
#endif /* HAVE_REPLICATION */
+ my_org_b_tell= my_b_tell(file);
+
#if defined(USING_TRANSACTIONS)
/*
Should we write to the binlog cache or to the binlog on disk?
@@ -4115,7 +4140,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
trans/non-trans table types the best possible in binlogging)
- or if the event asks for it (cache_stmt == TRUE).
*/
- if (opt_using_transactions && thd)
+ if (opt_using_transactions)
{
if (thd->binlog_setup_trx_data())
goto err;
@@ -4157,7 +4182,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
If row-based binlogging, Insert_id, Rand and other kind of "setting
context" events are not needed.
*/
- if (thd)
{
if (!thd->current_stmt_binlog_row_based)
{
@@ -4204,16 +4228,16 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
}
}
- /*
- Write the SQL command
- */
-
+ /* Write the SQL command */
if (event_info->write(file) ||
DBUG_EVALUATE_IF("injecting_fault_writing", 1, 0))
goto err;
if (file == &log_file) // we are writing to the real log (disk)
{
+ ulonglong data_written= (my_b_tell(file) - my_org_b_tell);
+ status_var_add(thd->status_var.binlog_bytes_written, data_written);
+
if (flush_and_sync())
goto err;
signal_update();
@@ -4339,6 +4363,7 @@ uint MYSQL_BIN_LOG::next_file_id()
SYNOPSIS
write_cache()
+ thd Current_thread
cache Cache to write to the binary log
lock_log True if the LOCK_log mutex should be aquired, false otherwise
sync_log True if the log should be flushed and sync:ed
@@ -4348,7 +4373,8 @@ uint MYSQL_BIN_LOG::next_file_id()
be reset as a READ_CACHE to be able to read the contents from it.
*/
-int MYSQL_BIN_LOG::write_cache(IO_CACHE *cache, bool lock_log, bool sync_log)
+int MYSQL_BIN_LOG::write_cache(THD *thd, IO_CACHE *cache, bool lock_log,
+ bool sync_log)
{
Mutex_sentry sentry(lock_log ? &LOCK_log : NULL);
@@ -4396,6 +4422,7 @@ int MYSQL_BIN_LOG::write_cache(IO_CACHE *cache, bool lock_log, bool sync_log)
/* write the first half of the split header */
if (my_b_write(&log_file, header, carry))
return ER_ERROR_ON_WRITE;
+ status_var_add(thd->status_var.binlog_bytes_written, carry);
/*
copy fixed second half of header to cache so the correct
@@ -4464,6 +4491,8 @@ int MYSQL_BIN_LOG::write_cache(IO_CACHE *cache, bool lock_log, bool sync_log)
/* Write data to the binary log file */
if (my_b_write(&log_file, cache->read_pos, length))
return ER_ERROR_ON_WRITE;
+ status_var_add(thd->status_var.binlog_bytes_written, length);
+
cache->read_pos=cache->read_end; // Mark buffer used up
} while ((length= my_b_fill(cache)));
@@ -4515,6 +4544,8 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd, bool lock)
if (lock)
pthread_mutex_lock(&LOCK_log);
ev.write(&log_file);
+ status_var_add(thd->status_var.binlog_bytes_written, ev.data_written);
+
if (lock)
{
if (!error && !(error= flush_and_sync()))
@@ -4586,21 +4617,28 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event,
*/
if (qinfo.write(&log_file))
goto err;
+ status_var_add(thd->status_var.binlog_bytes_written, qinfo.data_written);
DBUG_EXECUTE_IF("crash_before_writing_xid",
{
- if ((write_error= write_cache(cache, false, true)))
+ if ((write_error= write_cache(thd, cache, FALSE,
+ TRUE)))
DBUG_PRINT("info", ("error writing binlog cache: %d",
write_error));
DBUG_PRINT("info", ("crashing before writing xid"));
abort();
});
- if ((write_error= write_cache(cache, false, false)))
+ if ((write_error= write_cache(thd, cache, FALSE, FALSE)))
goto err;
- if (commit_event && commit_event->write(&log_file))
- goto err;
+ if (commit_event)
+ {
+ if (commit_event->write(&log_file))
+ goto err;
+ status_var_add(thd->status_var.binlog_bytes_written,
+ commit_event->data_written);
+ }
if (incident && write_incident(thd, FALSE))
goto err;
diff --git a/sql/log.h b/sql/log.h
index d306d6f7182..8c816cfda39 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -359,7 +359,8 @@ public:
bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event, bool incident);
bool write_incident(THD *thd, bool lock);
- int write_cache(IO_CACHE *cache, bool lock_log, bool flush_and_sync);
+ int write_cache(THD *thd, IO_CACHE *cache, bool lock_log,
+ bool flush_and_sync);
void set_write_error(THD *thd);
bool check_write_error(THD *thd);
@@ -487,6 +488,7 @@ public:
const char *sql_text, uint sql_text_len,
CHARSET_INFO *client_cs);
void flush();
+ void flush_slow_log();
void init_pthread_objects();
MYSQL_QUERY_LOG *get_mysql_slow_log() { return &mysql_slow_log; }
MYSQL_QUERY_LOG *get_mysql_log() { return &mysql_log; }
@@ -531,6 +533,7 @@ public:
void init_base();
void init_log_tables();
bool flush_logs(THD *thd);
+ bool flush_slow_log(THD *thd);
/* Perform basic logger cleanup. this will leave e.g. error log open. */
void cleanup_base();
/* Free memory. Nothing could be logged after this function is called */
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 03f8c9afb54..80a6c2585cf 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1121,7 +1121,7 @@ failed my_b_read"));
goto err;
}
if ((res= read_log_event(buf, data_len, &error, description_event)))
- res->register_temp_buf(buf);
+ res->register_temp_buf(buf, TRUE);
err:
UNLOCK_MUTEX;
@@ -4510,7 +4510,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli,
*/
lex_start(thd);
thd->lex->local_file= local_fname;
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, 0);
if (!use_rli_only_for_errors)
{
@@ -6306,7 +6306,7 @@ int Append_block_log_event::do_apply_event(Relay_log_info const *rli)
as the present method does not call mysql_parse().
*/
lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, 0);
my_delete(fname, MYF(0)); // old copy may exist already
if ((fd= my_create(fname, CREATE_MODE,
O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
@@ -7246,7 +7246,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
we need to do any changes to that value after this function.
*/
lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, 0);
/*
The current statement is just about to begin and
has not yet modified anything. Note, all.modified is reset
@@ -8044,6 +8044,111 @@ Table_map_log_event::~Table_map_log_event()
my_free(m_memory, MYF(MY_ALLOW_ZERO_PTR));
}
+
+#ifdef MYSQL_CLIENT
+
+/*
+ Rewrite database name for the event to name specified by new_db
+ SYNOPSIS
+ new_db Database name to change to
+ new_len Length
+ desc Event describing binlog that we're writing to.
+
+ DESCRIPTION
+ Reset db name. This function assumes that temp_buf member contains event
+ representation taken from a binary log. It resets m_dbnam and m_dblen and
+ rewrites temp_buf with new db name.
+
+ RETURN
+ 0 - Success
+ other - Error
+*/
+
+int Table_map_log_event::rewrite_db(const char* new_db, size_t new_len,
+ const Format_description_log_event* desc)
+{
+ DBUG_ENTER("Table_map_log_event::rewrite_db");
+ DBUG_ASSERT(temp_buf);
+
+ uint header_len= min(desc->common_header_len,
+ LOG_EVENT_MINIMAL_HEADER_LEN) + TABLE_MAP_HEADER_LEN;
+ int len_diff;
+
+ if (!(len_diff= new_len - m_dblen))
+ {
+ memcpy((void*) (temp_buf + header_len + 1), new_db, m_dblen + 1);
+ memcpy((void*) m_dbnam, new_db, m_dblen + 1);
+ DBUG_RETURN(0);
+ }
+
+ // Create new temp_buf
+ ulong event_cur_len= uint4korr(temp_buf + EVENT_LEN_OFFSET);
+ ulong event_new_len= event_cur_len + len_diff;
+ char* new_temp_buf= (char*) my_malloc(event_new_len, MYF(MY_WME));
+
+ if (!new_temp_buf)
+ {
+ sql_print_error("Table_map_log_event::rewrite_db: "
+ "failed to allocate new temp_buf (%d bytes required)",
+ event_new_len);
+ DBUG_RETURN(-1);
+ }
+
+ // Rewrite temp_buf
+ char* ptr= new_temp_buf;
+ ulong cnt= 0;
+
+ // Copy header and change event length
+ memcpy(ptr, temp_buf, header_len);
+ int4store(ptr + EVENT_LEN_OFFSET, event_new_len);
+ ptr += header_len;
+ cnt += header_len;
+
+ // Write new db name length and new name
+ *ptr++ = new_len;
+ memcpy(ptr, new_db, new_len + 1);
+ ptr += new_len + 1;
+ cnt += m_dblen + 2;
+
+ // Copy rest part
+ memcpy(ptr, temp_buf + cnt, event_cur_len - cnt);
+
+ // Reregister temp buf
+ free_temp_buf();
+ register_temp_buf(new_temp_buf, TRUE);
+
+ // Reset m_dbnam and m_dblen members
+ m_dblen= new_len;
+
+ // m_dbnam resides in m_memory together with m_tblnam and m_coltype
+ uchar* memory= m_memory;
+ char const* tblnam= m_tblnam;
+ uchar* coltype= m_coltype;
+
+ m_memory= (uchar*) my_multi_malloc(MYF(MY_WME),
+ &m_dbnam, (uint) m_dblen + 1,
+ &m_tblnam, (uint) m_tbllen + 1,
+ &m_coltype, (uint) m_colcnt,
+ NullS);
+
+ if (!m_memory)
+ {
+ sql_print_error("Table_map_log_event::rewrite_db: "
+ "failed to allocate new m_memory (%d + %d + %d bytes required)",
+ m_dblen + 1, m_tbllen + 1, m_colcnt);
+ DBUG_RETURN(-1);
+ }
+
+ memcpy((void*)m_dbnam, new_db, m_dblen + 1);
+ memcpy((void*)m_tblnam, tblnam, m_tbllen + 1);
+ memcpy(m_coltype, coltype, m_colcnt);
+
+ my_free(memory, MYF(MY_WME));
+ DBUG_RETURN(0);
+}
+#endif /* MYSQL_CLIENT */
+
+
/*
Return value is an error code, one of:
@@ -8515,7 +8620,7 @@ Rows_log_event::write_row(const Relay_log_info *const rli,
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
- error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
+ error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
if (error)
{
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
@@ -8547,10 +8652,10 @@ Rows_log_event::write_row(const Relay_log_info *const rli,
key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
0);
- error= table->file->index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_idx_map(table->record[1], keynum,
+ (const uchar*)key.get(),
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT);
if (error)
{
DBUG_PRINT("info",("index_read_idx() returns %s", HA_ERR(error)));
@@ -8818,13 +8923,14 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
length. Something along these lines should work:
ADD>>> store_record(table,record[1]);
- int error= table->file->rnd_pos(table->record[0], table->file->ref);
+ int error= table->file->ha_rnd_pos(table->record[0],
+ table->file->ref);
ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
table->s->reclength) == 0);
*/
DBUG_PRINT("info",("locating record using primary key (position)"));
- int error= table->file->rnd_pos_by_record(table->record[0]);
+ int error= table->file->ha_rnd_pos_by_record(table->record[0]);
if (error)
{
DBUG_PRINT("info",("rnd_pos returns error %d",error));
@@ -8884,9 +8990,9 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
table->record[0][table->s->null_bytes - 1]|=
256U - (1U << table->s->last_null_bit_pos);
- if ((error= table->file->index_read_map(table->record[0], m_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_map(table->record[0], m_key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT)))
{
DBUG_PRINT("info",("no record matching the key found in the table"));
if (error == HA_ERR_RECORD_DELETED)
@@ -8948,7 +9054,7 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
256U - (1U << table->s->last_null_bit_pos);
}
- while ((error= table->file->index_next(table->record[0])))
+ while ((error= table->file->ha_index_next(table->record[0])))
{
/* We just skip records that has already been deleted */
if (error == HA_ERR_RECORD_DELETED)
@@ -8984,7 +9090,7 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
do
{
restart_rnd_next:
- error= table->file->rnd_next(table->record[0]);
+ error= table->file->ha_rnd_next(table->record[0]);
DBUG_PRINT("info", ("error: %s", HA_ERR(error)));
switch (error) {
diff --git a/sql/log_event.h b/sql/log_event.h
index 3c59043aa53..e4bc699b9ca 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -873,6 +873,13 @@ public:
event's type, and its content is distributed in the event-specific fields.
*/
char *temp_buf;
+
+ /*
+ TRUE <=> this event 'owns' temp_buf and should call my_free() when done
+ with it
+ */
+ bool event_owns_temp_buf;
+
/*
Timestamp on the master(for debugging and replication of
NOW()/TIMESTAMP). It is important for queries and LOAD DATA
@@ -1014,12 +1021,17 @@ public:
Log_event(const char* buf, const Format_description_log_event
*description_event);
virtual ~Log_event() { free_temp_buf();}
- void register_temp_buf(char* buf) { temp_buf = buf; }
+ void register_temp_buf(char* buf, bool must_free)
+ {
+ temp_buf= buf;
+ event_owns_temp_buf= must_free;
+ }
void free_temp_buf()
{
if (temp_buf)
{
- my_free(temp_buf, MYF(0));
+ if (event_owns_temp_buf)
+ my_free(temp_buf, MYF(0));
temp_buf = 0;
}
}
@@ -3310,6 +3322,8 @@ public:
ulong get_table_id() const { return m_table_id; }
const char *get_table_name() const { return m_tblnam; }
const char *get_db_name() const { return m_dbnam; }
+ int rewrite_db(const char* new_name, size_t new_name_len,
+ const Format_description_log_event*);
#endif
virtual Log_event_type get_type_code() { return TABLE_MAP_EVENT; }
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 040b71b94e0..2ba76c45f85 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -63,7 +63,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
we need to do any changes to that value after this function.
*/
lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, 0);
/*
Check if the slave is set to use SBR. If so, it should switch
@@ -553,7 +553,7 @@ replace_record(THD *thd, TABLE *table,
*/
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
- error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
+ error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
if (error)
{
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
@@ -579,10 +579,10 @@ replace_record(THD *thd, TABLE *table,
key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
0);
- error= table->file->index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_idx_map(table->record[1], keynum,
+ (const uchar*)key.get(),
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT);
if (error)
{
DBUG_PRINT("info", ("index_read_idx() returns error %d", error));
@@ -694,13 +694,13 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
length. Something along these lines should work:
ADD>>> store_record(table,record[1]);
- int error= table->file->rnd_pos(table->record[0], table->file->ref);
+ int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
table->s->reclength) == 0);
*/
table->file->position(table->record[0]);
- int error= table->file->rnd_pos(table->record[0], table->file->ref);
+ int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
/*
rnd_pos() returns the record in table->record[0], so we have to
move it to table->record[1].
@@ -738,8 +738,9 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
my_ptrdiff_t const pos=
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
table->record[1][pos]= 0xFF;
- if ((error= table->file->index_read_map(table->record[1], key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_map(table->record[1], key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT)))
{
table->file->print_error(error, MYF(0));
table->file->ha_index_end();
@@ -793,7 +794,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
256U - (1U << table->s->last_null_bit_pos);
}
- while ((error= table->file->index_next(table->record[1])))
+ while ((error= table->file->ha_index_next(table->record[1])))
{
/* We just skip records that has already been deleted */
if (error == HA_ERR_RECORD_DELETED)
@@ -822,7 +823,7 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
do
{
restart_rnd_next:
- error= table->file->rnd_next(table->record[1]);
+ error= table->file->ha_rnd_next(table->record[1]);
DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
DBUG_DUMP("record[1]", table->record[1], table->s->reclength);
@@ -2111,7 +2112,7 @@ Old_rows_log_event::write_row(const Relay_log_info *const rli,
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
- error= table->file->rnd_pos(table->record[1], table->file->dup_ref);
+ error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
if (error)
{
DBUG_PRINT("info",("rnd_pos() returns error %d",error));
@@ -2143,10 +2144,10 @@ Old_rows_log_event::write_row(const Relay_log_info *const rli,
key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
0);
- error= table->file->index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_idx_map(table->record[1], keynum,
+ (const uchar*)key.get(),
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT);
if (error)
{
DBUG_PRINT("info",("index_read_idx() returns error %d", error));
@@ -2297,13 +2298,13 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli)
length. Something along these lines should work:
ADD>>> store_record(table,record[1]);
- int error= table->file->rnd_pos(table->record[0], table->file->ref);
+ int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
table->s->reclength) == 0);
*/
DBUG_PRINT("info",("locating record using primary key (position)"));
- int error= table->file->rnd_pos_by_record(table->record[0]);
+ int error= table->file->ha_rnd_pos_by_record(table->record[0]);
if (error)
{
DBUG_PRINT("info",("rnd_pos returns error %d",error));
@@ -2363,9 +2364,9 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli)
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
table->record[0][pos]= 0xFF;
- if ((error= table->file->index_read_map(table->record[0], m_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_map(table->record[0], m_key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT)))
{
DBUG_PRINT("info",("no record matching the key found in the table"));
if (error == HA_ERR_RECORD_DELETED)
@@ -2427,7 +2428,7 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli)
256U - (1U << table->s->last_null_bit_pos);
}
- while ((error= table->file->index_next(table->record[0])))
+ while ((error= table->file->ha_index_next(table->record[0])))
{
/* We just skip records that has already been deleted */
if (error == HA_ERR_RECORD_DELETED)
@@ -2463,7 +2464,7 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli)
do
{
restart_rnd_next:
- error= table->file->rnd_next(table->record[0]);
+ error= table->file->ha_rnd_next(table->record[0]);
switch (error) {
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 2af7654e381..e335712a3f7 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -92,12 +92,16 @@ extern MYSQL_PLUGIN_IMPORT const char *primary_key_name;
#include "unireg.h"
void init_sql_alloc(MEM_ROOT *root, uint block_size, uint pre_alloc_size);
+#endif // MYSQL_CLIENT
+
void *sql_alloc(size_t);
void *sql_calloc(size_t);
char *sql_strdup(const char *str);
char *sql_strmake(const char *str, size_t len);
void *sql_memdup(const void * ptr, size_t size);
void sql_element_free(void *ptr);
+
+#ifndef MYSQL_CLIENT
char *sql_strmake_with_convert(const char *str, size_t arg_length,
CHARSET_INFO *from_cs,
size_t max_res_length,
@@ -1063,6 +1067,7 @@ bool setup_connection_thread_globals(THD *thd);
bool login_connection(THD *thd);
void end_connection(THD *thd);
void prepare_new_connection_state(THD* thd);
+void update_global_user_stats(THD* thd, bool create_user, time_t now);
int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent);
bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create);
@@ -1099,14 +1104,22 @@ bool is_update_query(enum enum_sql_command command);
bool is_log_table_write_query(enum enum_sql_command command);
bool alloc_query(THD *thd, const char *packet, uint packet_length);
void mysql_init_select(LEX *lex);
-void mysql_reset_thd_for_next_command(THD *thd);
+void mysql_reset_thd_for_next_command(THD *thd, my_bool calculate_userstat);
bool mysql_new_select(LEX *lex, bool move_down);
void create_select_for_variable(const char *var_name);
void mysql_init_multi_delete(LEX *lex);
bool multi_delete_set_locks_and_link_aux_tables(LEX *lex);
void init_max_user_conn(void);
void init_update_queries(void);
+void init_global_user_stats(void);
+void init_global_table_stats(void);
+void init_global_index_stats(void);
+void init_global_client_stats(void);
void free_max_user_conn(void);
+void free_global_user_stats(void);
+void free_global_table_stats(void);
+void free_global_index_stats(void);
+void free_global_client_stats(void);
pthread_handler_t handle_bootstrap(void *arg);
int mysql_execute_command(THD *thd);
bool do_command(THD *thd);
@@ -1349,6 +1362,7 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length,
bool allow_rowid, uint *cached_field_index_ptr);
Field *
find_field_in_table_sef(TABLE *table, const char *name);
+int update_virtual_fields(TABLE *table, bool ignore_stored= FALSE);
#endif /* MYSQL_SERVER */
@@ -1469,14 +1483,16 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum enum_field_types t
LEX_STRING *comment,
char *change, List<String> *interval_list,
CHARSET_INFO *cs,
- uint uint_geom_type);
+ uint uint_geom_type,
+ Virtual_column_info *vcol_info);
Create_field * new_create_field(THD *thd, char *field_name, enum_field_types type,
char *length, char *decimals,
uint type_modifier,
Item *default_value, Item *on_update_value,
LEX_STRING *comment, char *change,
List<String> *interval_list, CHARSET_INFO *cs,
- uint uint_geom_type);
+ uint uint_geom_type,
+ Virtual_column_info *vcol_info);
void store_position_for_column(const char *name);
bool add_to_list(THD *thd, SQL_LIST &list,Item *group,bool asc);
bool push_new_name_resolution_context(THD *thd,
@@ -1967,6 +1983,7 @@ extern ulong max_connect_errors, connect_timeout;
extern ulong extra_max_connections;
extern ulong slave_net_timeout, slave_trans_retries;
extern uint max_user_connections;
+extern ulonglong denied_connections;
extern ulong what_to_log,flush_time;
extern ulong query_buff_size;
extern ulong max_prepared_stmt_count, prepared_stmt_count;
@@ -2020,6 +2037,7 @@ extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
extern my_bool opt_slave_compressed_protocol, use_temp_pool;
extern ulong slave_exec_mode_options;
extern my_bool opt_readonly, lower_case_file_system;
+extern my_bool opt_userstat_running;
extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs;
extern my_bool opt_secure_auth;
extern char* opt_secure_file_priv;
@@ -2060,6 +2078,11 @@ extern pthread_mutex_t LOCK_des_key_file;
#endif
extern pthread_mutex_t LOCK_server_started;
extern pthread_cond_t COND_server_started;
+extern pthread_mutex_t LOCK_global_user_client_stats;
+extern pthread_mutex_t LOCK_global_table_stats;
+extern pthread_mutex_t LOCK_global_index_stats;
+extern pthread_mutex_t LOCK_stats;
+
extern int mysqld_server_started;
extern rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
extern rw_lock_t LOCK_system_variables_hash;
@@ -2086,6 +2109,11 @@ extern KNOWN_DATE_TIME_FORMAT known_date_time_formats[];
extern String null_string;
extern HASH open_cache, lock_db_cache;
+extern HASH global_user_stats;
+extern HASH global_client_stats;
+extern HASH global_table_stats;
+extern HASH global_index_stats;
+
extern TABLE *unused_tables;
extern const char* any_db;
extern struct my_option my_long_options[];
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 61a7c16cd90..750a221a785 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -417,6 +417,7 @@ static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
bool opt_update_log, opt_bin_log, opt_ignore_builtin_innodb= 0;
my_bool opt_log, opt_slow_log;
+my_bool opt_userstat_running;
ulong log_output_options;
my_bool opt_log_queries_not_using_indexes= 0;
bool opt_error_log= IF_WIN(1,0);
@@ -552,6 +553,7 @@ ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
ulong extra_max_connections;
uint max_user_connections= 0;
+ulonglong denied_connections;
/**
Limit of the total number of prepared statements in the server.
Is necessary to protect the server against out-of-memory attacks.
@@ -653,6 +655,9 @@ pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count,
LOCK_global_system_variables,
LOCK_user_conn, LOCK_slave_list, LOCK_active_mi,
LOCK_connection_count, LOCK_uuid_generator;
+pthread_mutex_t LOCK_stats, LOCK_global_user_client_stats;
+pthread_mutex_t LOCK_global_table_stats, LOCK_global_index_stats;
+
/**
The below lock protects access to two global server variables:
max_prepared_stmt_count and prepared_stmt_count. These variables
@@ -1358,6 +1363,10 @@ void clean_up(bool print_message)
x_free(opt_secure_file_priv);
bitmap_free(&temp_pool);
free_max_user_conn();
+ free_global_user_stats();
+ free_global_client_stats();
+ free_global_table_stats();
+ free_global_index_stats();
#ifdef HAVE_REPLICATION
end_slave_list();
#endif
@@ -1448,6 +1457,11 @@ static void clean_up_mutexes()
(void) pthread_mutex_destroy(&LOCK_bytes_received);
(void) pthread_mutex_destroy(&LOCK_user_conn);
(void) pthread_mutex_destroy(&LOCK_connection_count);
+ (void) pthread_mutex_destroy(&LOCK_stats);
+ (void) pthread_mutex_destroy(&LOCK_global_user_client_stats);
+ (void) pthread_mutex_destroy(&LOCK_global_table_stats);
+ (void) pthread_mutex_destroy(&LOCK_global_index_stats);
+
Events::destroy_mutexes();
#ifdef HAVE_OPENSSL
(void) pthread_mutex_destroy(&LOCK_des_key_file);
@@ -3228,6 +3242,7 @@ SHOW_VAR com_status_vars[]= {
{"show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS},
{"show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS},
{"show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS},
+ {"show_client_statistics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CLIENT_STATS]), SHOW_LONG_STATUS},
{"show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS},
{"show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS},
{"show_contributors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CONTRIBUTORS]), SHOW_LONG_STATUS},
@@ -3250,6 +3265,7 @@ SHOW_VAR com_status_vars[]= {
{"show_function_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS_FUNC]), SHOW_LONG_STATUS},
{"show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS},
{"show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS},
+ {"show_index_statistics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INDEX_STATS]), SHOW_LONG_STATUS},
{"show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS},
{"show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS},
{"show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS},
@@ -3266,9 +3282,11 @@ SHOW_VAR com_status_vars[]= {
{"show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS},
{"show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS},
{"show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS},
+ {"show_table_statistics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLE_STATS]), SHOW_LONG_STATUS},
{"show_table_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLE_STATUS]), SHOW_LONG_STATUS},
{"show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS},
{"show_triggers", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TRIGGERS]), SHOW_LONG_STATUS},
+ {"show_user_statistics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_USER_STATS]), SHOW_LONG_STATUS},
{"show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS},
{"show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS},
{"slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS},
@@ -3673,6 +3691,12 @@ static int init_thread_environment()
(void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST);
(void) pthread_mutex_init(&LOCK_connection_count, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_stats, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_global_user_client_stats,
+ MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_global_table_stats, MY_MUTEX_INIT_FAST);
+ (void) pthread_mutex_init(&LOCK_global_index_stats, MY_MUTEX_INIT_FAST);
+
#ifdef HAVE_OPENSSL
(void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST);
#ifndef HAVE_YASSL
@@ -4037,6 +4061,9 @@ server.");
/* call ha_init_key_cache() on all key caches to init them */
process_key_caches(&ha_init_key_cache);
+ init_global_table_stats();
+ init_global_index_stats();
+
/* Allow storage engine to give real error messages */
if (ha_init_errors())
DBUG_RETURN(1);
@@ -4242,6 +4269,8 @@ server.");
init_max_user_conn();
init_update_queries();
+ init_global_user_stats();
+ init_global_client_stats();
DBUG_RETURN(0);
}
@@ -5060,6 +5089,7 @@ static void create_new_thread(THD *thd)
DBUG_PRINT("error",("Too many connections"));
close_connection(thd, ER_CON_COUNT_ERROR, 1);
+ statistic_increment(denied_connections, &LOCK_status);
delete thd;
DBUG_VOID_RETURN;
}
@@ -5862,6 +5892,7 @@ enum options_mysqld
OPT_LOG_SLOW_RATE_LIMIT,
OPT_LOG_SLOW_VERBOSITY,
OPT_LOG_SLOW_FILTER,
+ OPT_USERSTAT,
OPT_GENERAL_LOG_FILE,
OPT_SLOW_QUERY_LOG_FILE,
OPT_IGNORE_BUILTIN_INNODB
@@ -7271,6 +7302,10 @@ The minimum value for this variable is 4096.",
(uchar**) &max_system_variables.net_wait_timeout, 0, GET_ULONG,
REQUIRED_ARG, NET_WAIT_TIMEOUT, 1, IF_WIN(INT_MAX32/1000, LONG_TIMEOUT),
0, 1, 0},
+ {"userstat", OPT_USERSTAT,
+ "Control USER_STATISTICS, CLIENT_STATISTICS, INDEX_STATISTICS and TABLE_STATISTICS running",
+ (uchar**) &opt_userstat_running, (uchar**) &opt_userstat_running,
+ 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -7641,19 +7676,24 @@ static int show_ssl_get_cipher_list(THD *thd, SHOW_VAR *var, char *buff)
SHOW_VAR status_vars[]= {
{"Aborted_clients", (char*) &aborted_threads, SHOW_LONG},
{"Aborted_connects", (char*) &aborted_connects, SHOW_LONG},
+ {"Access_denied_errors", (char*) offsetof(STATUS_VAR, access_denied_errors), SHOW_LONG_STATUS},
{"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG},
{"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG},
+ {"Busy_time", (char*) offsetof(STATUS_VAR, busy_time), SHOW_DOUBLE_STATUS},
{"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), SHOW_LONGLONG_STATUS},
{"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONGLONG_STATUS},
+ {"Binlog_bytes_written", (char*) offsetof(STATUS_VAR, binlog_bytes_written), SHOW_LONGLONG_STATUS},
{"Com", (char*) com_status_vars, SHOW_ARRAY},
{"Compression", (char*) &show_net_compression, SHOW_FUNC},
{"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH},
+ {"Cpu_time", (char*) offsetof(STATUS_VAR, cpu_time), SHOW_DOUBLE_STATUS},
{"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables), SHOW_LONG_STATUS},
{"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG},
{"Created_tmp_tables", (char*) offsetof(STATUS_VAR, created_tmp_tables), SHOW_LONG_STATUS},
{"Delayed_errors", (char*) &delayed_insert_errors, SHOW_LONG},
{"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_NOFLUSH},
{"Delayed_writes", (char*) &delayed_insert_writes, SHOW_LONG},
+ {"Empty_queries", (char*) offsetof(STATUS_VAR, empty_queries), SHOW_LONG_STATUS},
{"Flush_commands", (char*) &refresh_version, SHOW_LONG_NOFLUSH},
{"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS},
{"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS},
@@ -7688,6 +7728,8 @@ SHOW_VAR status_vars[]= {
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
{"Opened_table_definitions", (char*) offsetof(STATUS_VAR, opened_shares), SHOW_LONG_STATUS},
{"Prepared_stmt_count", (char*) &show_prepared_stmt_count, SHOW_FUNC},
+ {"Rows_sent", (char*) offsetof(STATUS_VAR, rows_sent), SHOW_LONG_STATUS},
+ {"Rows_read", (char*) offsetof(STATUS_VAR, rows_read), SHOW_LONG_STATUS},
#ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_NOFLUSH},
{"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_NOFLUSH},
@@ -8256,6 +8298,7 @@ mysqld_get_one_option(int optid,
}
case (int)OPT_REPLICATE_REWRITE_DB:
{
+ /* See also OPT_REWRITE_DB handling in client/mysqlbinlog.cc */
char* key = argument,*p, *val;
if (!(p= strstr(argument, "->")))
@@ -9194,6 +9237,8 @@ void refresh_status(THD *thd)
/* Reset thread's status variables */
bzero((uchar*) &thd->status_var, sizeof(thd->status_var));
+ bzero((uchar*) &thd->org_status_var, sizeof(thd->org_status_var));
+ thd->start_bytes_received= 0;
/* Reset some global variables */
reset_status_vars();
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index cff1b89179d..8d431b16c66 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -8342,7 +8342,7 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
/* We get here if we got the same row ref in all scans. */
if (need_to_fetch_row)
- error= head->file->rnd_pos(head->record[0], last_rowid);
+ error= head->file->ha_rnd_pos(head->record[0], last_rowid);
} while (error == HA_ERR_RECORD_DELETED);
DBUG_RETURN(error);
}
@@ -8408,7 +8408,7 @@ int QUICK_ROR_UNION_SELECT::get_next()
cur_rowid= prev_rowid;
prev_rowid= tmp;
- error= head->file->rnd_pos(quick->record, prev_rowid);
+ error= head->file->ha_rnd_pos(quick->record, prev_rowid);
} while (error == HA_ERR_RECORD_DELETED);
DBUG_RETURN(error);
}
@@ -8633,10 +8633,12 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length,
key_range start_key, end_key;
if (last_range)
{
- /* Read the next record in the same range with prefix after cur_prefix. */
+ /*
+ Read the next record in the same range with prefix after cur_prefix.
+ */
DBUG_ASSERT(cur_prefix != 0);
- result= file->index_read_map(record, cur_prefix, keypart_map,
- HA_READ_AFTER_KEY);
+ result= file->ha_index_read_map(record, cur_prefix, keypart_map,
+ HA_READ_AFTER_KEY);
if (result || (file->compare_key(file->end_range) <= 0))
DBUG_RETURN(result);
}
@@ -8692,8 +8694,8 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
if (last_range)
{
// Already read through key
- result= file->index_next_same(record, last_range->min_key,
- last_range->min_length);
+ result= file->ha_index_next_same(record, last_range->min_key,
+ last_range->min_length);
if (result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
}
@@ -8707,10 +8709,10 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
}
last_range= *(cur_range++);
- result= file->index_read_map(record, last_range->min_key,
- last_range->min_keypart_map,
- (ha_rkey_function)(last_range->flag ^
- GEOM_FLAG));
+ result= file->ha_index_read_map(record, last_range->min_key,
+ last_range->min_keypart_map,
+ (ha_rkey_function)(last_range->flag ^
+ GEOM_FLAG));
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
last_range= 0; // Not found, to next range
@@ -8822,9 +8824,9 @@ int QUICK_SELECT_DESC::get_next()
{ // Already read through key
result = ((last_range->flag & EQ_RANGE &&
used_key_parts <= head->key_info[index].key_parts) ?
- file->index_next_same(record, last_range->min_key,
+ file->ha_index_next_same(record, last_range->min_key,
last_range->min_length) :
- file->index_prev(record));
+ file->ha_index_prev(record));
if (!result)
{
if (cmp_prev(*rev_it.ref()) == 0)
@@ -8840,7 +8842,7 @@ int QUICK_SELECT_DESC::get_next()
if (last_range->flag & NO_MAX_RANGE) // Read last record
{
int local_error;
- if ((local_error=file->index_last(record)))
+ if ((local_error= file->ha_index_last(record)))
DBUG_RETURN(local_error); // Empty table
if (cmp_prev(last_range) == 0)
DBUG_RETURN(0);
@@ -8852,9 +8854,9 @@ int QUICK_SELECT_DESC::get_next()
used_key_parts <= head->key_info[index].key_parts)
{
- result = file->index_read_map(record, last_range->max_key,
- last_range->max_keypart_map,
- HA_READ_KEY_EXACT);
+ result= file->ha_index_read_map(record, last_range->max_key,
+ last_range->max_keypart_map,
+ HA_READ_KEY_EXACT);
}
else
{
@@ -8862,11 +8864,11 @@ int QUICK_SELECT_DESC::get_next()
(last_range->flag & EQ_RANGE &&
used_key_parts > head->key_info[index].key_parts) ||
range_reads_after_key(last_range));
- result=file->index_read_map(record, last_range->max_key,
- last_range->max_keypart_map,
- ((last_range->flag & NEAR_MAX) ?
- HA_READ_BEFORE_KEY :
- HA_READ_PREFIX_LAST_OR_PREV));
+ result= file->ha_index_read_map(record, last_range->max_key,
+ last_range->max_keypart_map,
+ ((last_range->flag & NEAR_MAX) ?
+ HA_READ_BEFORE_KEY :
+ HA_READ_PREFIX_LAST_OR_PREV));
}
if (result)
{
@@ -10602,7 +10604,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
DBUG_RETURN(result);
if (quick_prefix_select && quick_prefix_select->reset())
DBUG_RETURN(1);
- result= file->index_last(record);
+ result= file->ha_index_last(record);
if (result == HA_ERR_END_OF_FILE)
DBUG_RETURN(0);
/* Save the prefix of the last group. */
@@ -10704,9 +10706,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
first sub-group with the extended prefix.
*/
if (!have_min && !have_max && key_infix_len > 0)
- result= file->index_read_map(record, group_prefix,
- make_prev_keypart_map(real_key_parts),
- HA_READ_KEY_EXACT);
+ result= file->ha_index_read_map(record, group_prefix,
+ make_prev_keypart_map(real_key_parts),
+ HA_READ_KEY_EXACT);
result= have_min ? min_res : have_max ? max_res : result;
} while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
@@ -10768,9 +10770,10 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
/* Apply the constant equality conditions to the non-group select fields */
if (key_infix_len > 0)
{
- if ((result= file->index_read_map(record, group_prefix,
- make_prev_keypart_map(real_key_parts),
- HA_READ_KEY_EXACT)))
+ if ((result=
+ file->ha_index_read_map(record, group_prefix,
+ make_prev_keypart_map(real_key_parts),
+ HA_READ_KEY_EXACT)))
DBUG_RETURN(result);
}
@@ -10785,9 +10788,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
{
/* Find the first subsequent record without NULL in the MIN/MAX field. */
key_copy(tmp_record, record, index_info, 0);
- result= file->index_read_map(record, tmp_record,
- make_keypart_map(real_key_parts),
- HA_READ_AFTER_KEY);
+ result= file->ha_index_read_map(record, tmp_record,
+ make_keypart_map(real_key_parts),
+ HA_READ_AFTER_KEY);
/*
Check if the new record belongs to the current group by comparing its
prefix with the group's prefix. If it is from the next group, then the
@@ -10842,9 +10845,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max()
if (min_max_ranges.elements > 0)
result= next_max_in_range();
else
- result= file->index_read_map(record, group_prefix,
- make_prev_keypart_map(real_key_parts),
- HA_READ_PREFIX_LAST);
+ result= file->ha_index_read_map(record, group_prefix,
+ make_prev_keypart_map(real_key_parts),
+ HA_READ_PREFIX_LAST);
DBUG_RETURN(result);
}
@@ -10887,7 +10890,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
{
if (!seen_first_key)
{
- result= file->index_first(record);
+ result= file->ha_index_first(record);
if (result)
DBUG_RETURN(result);
seen_first_key= TRUE;
@@ -10895,9 +10898,9 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
else
{
/* Load the first key in this group into record. */
- result= file->index_read_map(record, group_prefix,
- make_prev_keypart_map(group_key_parts),
- HA_READ_AFTER_KEY);
+ result= file->ha_index_read_map(record, group_prefix,
+ make_prev_keypart_map(group_key_parts),
+ HA_READ_AFTER_KEY);
if (result)
DBUG_RETURN(result);
}
@@ -10974,7 +10977,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT;
}
- result= file->index_read_map(record, group_prefix, keypart_map, find_flag);
+ result= file->ha_index_read_map(record, group_prefix, keypart_map,
+ find_flag);
if (result)
{
if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
@@ -11113,7 +11117,8 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV;
}
- result= file->index_read_map(record, group_prefix, keypart_map, find_flag);
+ result= file->ha_index_read_map(record, group_prefix, keypart_map,
+ find_flag);
if (result)
{
diff --git a/sql/opt_range.h b/sql/opt_range.h
index 6a0234e62fe..6f7ef7842e7 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -727,7 +727,7 @@ public:
~FT_SELECT() { file->ft_end(); }
int init() { return error=file->ft_init(); }
int reset() { return 0; }
- int get_next() { return error=file->ft_read(record); }
+ int get_next() { return error= file->ha_ft_read(record); }
int get_type() { return QS_TYPE_FULLTEXT; }
};
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 87ef3af6e44..cefb507a61e 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -255,7 +255,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
- error= table->file->index_first(table->record[0]);
+ error= table->file->ha_index_first(table->record[0]);
else
{
/*
@@ -277,10 +277,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
Closed interval: Either The MIN argument is non-nullable, or
we have a >= predicate for the MIN argument.
*/
- error= table->file->index_read_map(table->record[0],
- ref.key_buff,
- make_prev_keypart_map(ref.key_parts),
- HA_READ_KEY_OR_NEXT);
+ error= table->file->ha_index_read_map(table->record[0],
+ ref.key_buff,
+ make_prev_keypart_map(ref.key_parts),
+ HA_READ_KEY_OR_NEXT);
else
{
/*
@@ -289,10 +289,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
2) there is a > predicate on it, nullability is irrelevant.
We need to scan the next bigger record first.
*/
- error= table->file->index_read_map(table->record[0],
- ref.key_buff,
- make_prev_keypart_map(ref.key_parts),
- HA_READ_AFTER_KEY);
+ error= table->file->ha_index_read_map(table->record[0],
+ ref.key_buff,
+ make_prev_keypart_map(ref.key_parts),
+ HA_READ_AFTER_KEY);
/*
If the found record is outside the group formed by the search
prefix, or there is no such record at all, check if all
@@ -315,10 +315,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
key_cmp_if_same(table, ref.key_buff, ref.key, prefix_len)))
{
DBUG_ASSERT(item_field->field->real_maybe_null());
- error= table->file->index_read_map(table->record[0],
- ref.key_buff,
- make_prev_keypart_map(ref.key_parts),
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_map(table->record[0],
+ ref.key_buff,
+ make_prev_keypart_map(ref.key_parts),
+ HA_READ_KEY_EXACT);
}
}
}
@@ -403,13 +403,13 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds)
error= table->file->ha_index_init((uint) ref.key, 1);
if (!ref.key_length)
- error= table->file->index_last(table->record[0]);
+ error= table->file->ha_index_last(table->record[0]);
else
- error= table->file->index_read_map(table->record[0], key_buff,
- make_prev_keypart_map(ref.key_parts),
- range_fl & NEAR_MAX ?
- HA_READ_BEFORE_KEY :
- HA_READ_PREFIX_LAST_OR_PREV);
+ error= table->file->ha_index_read_map(table->record[0], key_buff,
+ make_prev_keypart_map(ref.key_parts),
+ range_fl & NEAR_MAX ?
+ HA_READ_BEFORE_KEY :
+ HA_READ_PREFIX_LAST_OR_PREV);
if (!error && reckey_in_range(1, &ref, item_field->field,
conds, range_fl, prefix_len))
error= HA_ERR_KEY_NOT_FOUND;
diff --git a/sql/procedure.h b/sql/procedure.h
index ceb586766b1..30a8a0efccb 100644
--- a/sql/procedure.h
+++ b/sql/procedure.h
@@ -42,7 +42,11 @@ public:
{
init_make_field(tmp_field,field_type());
}
- unsigned int size_of() { return sizeof(*this);}
+ unsigned int size_of() { return sizeof(*this);}
+ bool check_vcol_func_processor(uchar *int_arg)
+ {
+ return trace_unsupported_by_check_vcol_func_processor("proc");
+ }
};
class Item_proc_real :public Item_proc
diff --git a/sql/records.cc b/sql/records.cc
index 2fc5a26a210..e2a1ea9b4af 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -327,6 +327,7 @@ static int rr_quick(READ_RECORD *info)
break;
}
}
+ update_virtual_fields(info->table);
return tmp;
}
@@ -346,7 +347,7 @@ static int rr_quick(READ_RECORD *info)
static int rr_index_first(READ_RECORD *info)
{
- int tmp= info->file->index_first(info->record);
+ int tmp= info->file->ha_index_first(info->record);
info->read_record= rr_index;
if (tmp)
tmp= rr_handle_error(info, tmp);
@@ -372,7 +373,7 @@ static int rr_index_first(READ_RECORD *info)
static int rr_index(READ_RECORD *info)
{
- int tmp= info->file->index_next(info->record);
+ int tmp= info->file->ha_index_next(info->record);
if (tmp)
tmp= rr_handle_error(info, tmp);
return tmp;
@@ -382,7 +383,7 @@ static int rr_index(READ_RECORD *info)
int rr_sequential(READ_RECORD *info)
{
int tmp;
- while ((tmp=info->file->rnd_next(info->record)))
+ while ((tmp= info->file->ha_rnd_next(info->record)))
{
/*
rnd_next can return RECORD_DELETED for MyISAM when one thread is
@@ -394,6 +395,8 @@ int rr_sequential(READ_RECORD *info)
break;
}
}
+ if (!tmp)
+ update_virtual_fields(info->table);
return tmp;
}
@@ -405,7 +408,7 @@ static int rr_from_tempfile(READ_RECORD *info)
{
if (my_b_read(info->io_cache,info->ref_pos,info->ref_length))
return -1; /* End of file */
- if (!(tmp=info->file->rnd_pos(info->record,info->ref_pos)))
+ if (!(tmp= info->file->ha_rnd_pos(info->record,info->ref_pos)))
break;
/* The following is extremely unlikely to happen */
if (tmp == HA_ERR_RECORD_DELETED ||
@@ -456,7 +459,7 @@ static int rr_from_pointers(READ_RECORD *info)
cache_pos= info->cache_pos;
info->cache_pos+= info->ref_length;
- if (!(tmp=info->file->rnd_pos(info->record,cache_pos)))
+ if (!(tmp= info->file->ha_rnd_pos(info->record,cache_pos)))
break;
/* The following is extremely unlikely to happen */
@@ -589,7 +592,7 @@ static int rr_from_cache(READ_RECORD *info)
record=uint3korr(position);
position+=3;
record_pos=info->cache+record*info->reclength;
- if ((error=(int16) info->file->rnd_pos(record_pos,info->ref_pos)))
+ if ((error=(int16) info->file->ha_rnd_pos(record_pos,info->ref_pos)))
{
record_pos[info->error_offset]=1;
shortstore(record_pos,error);
diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc
index ee3fc358b60..ffae94f733a 100644
--- a/sql/rpl_filter.cc
+++ b/sql/rpl_filter.cc
@@ -45,6 +45,7 @@ Rpl_filter::~Rpl_filter()
}
+#ifndef MYSQL_CLIENT
/*
Returns true if table should be logged/replicated
@@ -129,6 +130,7 @@ Rpl_filter::tables_ok(const char* db, TABLE_LIST* tables)
!do_table_inited && !wild_do_table_inited);
}
+#endif
/*
Checks whether a db matches some do_db and ignore_db rules
@@ -520,6 +522,13 @@ Rpl_filter::get_wild_ignore_table(String* str)
}
+bool
+Rpl_filter::rewrite_db_is_empty()
+{
+ return rewrite_db.is_empty();
+}
+
+
const char*
Rpl_filter::get_rewrite_db(const char* db, size_t *new_len)
{
diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h
index ff7e4081bb1..88951600e56 100644
--- a/sql/rpl_filter.h
+++ b/sql/rpl_filter.h
@@ -42,7 +42,9 @@ public:
/* Checks - returns true if ok to replicate/log */
- bool tables_ok(const char* db, TABLE_LIST* tables);
+#ifndef MYSQL_CLIENT
+ bool tables_ok(const char* db, TABLE_LIST *tables);
+#endif
bool db_ok(const char* db);
bool db_ok_with_wild_table(const char *db);
@@ -69,6 +71,7 @@ public:
void get_wild_do_table(String* str);
void get_wild_ignore_table(String* str);
+ bool rewrite_db_is_empty();
const char* get_rewrite_db(const char* db, size_t *new_len);
I_List<i_string>* get_do_db();
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 166f280be4e..3a63e46399a 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -511,6 +511,9 @@ static sys_var_const sys_protocol_version(&vars, "protocol_version",
static sys_var_thd_ulong sys_read_buff_size(&vars, "read_buffer_size",
&SV::read_buff_size);
static sys_var_opt_readonly sys_readonly(&vars, "read_only", &opt_readonly);
+static sys_var_bool_ptr sys_userstat(&vars, "userstat",
+ &opt_userstat_running);
+
static sys_var_thd_ulong sys_read_rnd_buff_size(&vars, "read_rnd_buffer_size",
&SV::read_rnd_buff_size);
static sys_var_thd_ulong sys_div_precincrement(&vars, "div_precision_increment",
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 20f51a952bc..a8839425aa7 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -6207,6 +6207,33 @@ ER_TOO_MANY_CONCURRENT_TRXS
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED
eng "Non-ASCII separator arguments are not fully supported"
+ER_VCOL_BASED_ON_VCOL
+ eng "A computed column cannot be based on a computed column"
+
+ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED
+ eng "Function or expression is not allowed for column '%s'."
+
+ER_DATA_CONVERSION_ERROR_FOR_VIRTUAL_COLUMN
+ eng "Generated value for computed column '%s' cannot be converted to type '%s'."
+
+ER_PRIMARY_KEY_BASED_ON_VIRTUAL_COLUMN
+ eng "Primary key cannot be defined upon a computed column."
+
+ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN
+ eng "Key/Index cannot be defined on a non-stored computed column."
+
+ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN
+ eng "Cannot define foreign key with %s clause on a computed column."
+
+ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN
+ eng "The value specified for computed column '%s' in table '%s' ignored."
+
+ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN
+ eng "'%s' is not yet supported for computed columns."
+
+ER_CONST_EXPR_IN_VCOL
+ eng "Constant expression in computed column function is not allowed."
+
ER_DEBUG_SYNC_TIMEOUT
eng "debug sync point wait timed out"
ger "Debug Sync Point Wartezeit überschritten"
diff --git a/sql/sp.cc b/sql/sp.cc
index fd420732628..d4626dde38f 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -344,8 +344,9 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table)
key_copy(key, table->record[0], table->key_info,
table->key_info->key_length);
- if (table->file->index_read_idx_map(table->record[0], 0, key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0], 0, key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
DBUG_RETURN(SP_KEY_NOT_FOUND);
DBUG_RETURN(SP_OK);
@@ -1101,9 +1102,9 @@ sp_drop_db_routines(THD *thd, char *db)
ret= SP_OK;
table->file->ha_index_init(0, 1);
- if (! table->file->index_read_map(table->record[0],
- (uchar *)table->field[MYSQL_PROC_FIELD_DB]->ptr,
- (key_part_map)1, HA_READ_KEY_EXACT))
+ if (!table->file->ha_index_read_map(table->record[0],
+ (uchar *) table->field[MYSQL_PROC_FIELD_DB]->ptr,
+ (key_part_map)1, HA_READ_KEY_EXACT))
{
int nxtres;
bool deleted= FALSE;
@@ -1118,9 +1119,11 @@ sp_drop_db_routines(THD *thd, char *db)
nxtres= 0;
break;
}
- } while (! (nxtres= table->file->index_next_same(table->record[0],
- (uchar *)table->field[MYSQL_PROC_FIELD_DB]->ptr,
- key_len)));
+ } while (!(nxtres= table->file->
+ ha_index_next_same(table->record[0],
+ (uchar *)table->field[MYSQL_PROC_FIELD_DB]->
+ ptr,
+ key_len)));
if (nxtres != HA_ERR_END_OF_FILE)
ret= SP_KEY_NOT_FOUND;
if (deleted)
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index e285b20928b..580ed900d8a 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -819,6 +819,8 @@ sp_head::create_result_field(uint field_max_length, const char *field_name,
m_return_field_def.interval,
field_name ? field_name : (const char *) m_name.str);
+ field->vcol_info= m_return_field_def.vcol_info;
+ field->stored_in_db= m_return_field_def.stored_in_db;
if (field)
field->init(table);
@@ -2201,7 +2203,8 @@ sp_head::fill_field_definition(THD *thd, LEX *lex,
&lex->interval_list,
lex->charset ? lex->charset :
thd->variables.collation_database,
- lex->uint_geom_type))
+ lex->uint_geom_type,
+ lex->vcol_info))
return TRUE;
if (field_def->interval_list.elements)
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 9d4eb362fe7..c9f107cd0bb 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1833,9 +1833,9 @@ static bool update_user_table(THD *thd, TABLE *table,
key_copy((uchar *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
- if (table->file->index_read_idx_map(table->record[0], 0,
- (uchar *) user_key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar *) user_key, HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH),
MYF(0)); /* purecov: deadcode */
@@ -1926,9 +1926,9 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- if (table->file->index_read_idx_map(table->record[0], 0, user_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
/* what == 'N' means revoke */
if (what == 'N')
@@ -2150,9 +2150,9 @@ static int replace_db_table(TABLE *table, const char *db,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- if (table->file->index_read_idx_map(table->record[0],0, user_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0],0, user_key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
if (what == 'N')
{ // no row, no revoke
@@ -2384,8 +2384,9 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
col_privs->field[4]->store("",0, &my_charset_latin1);
col_privs->file->ha_index_init(0, 1);
- if (col_privs->file->index_read_map(col_privs->record[0], (uchar*) key,
- (key_part_map)15, HA_READ_KEY_EXACT))
+ if (col_privs->file->ha_index_read_map(col_privs->record[0], (uchar*) key,
+ (key_part_map)15,
+ HA_READ_KEY_EXACT))
{
cols = 0; /* purecov: deadcode */
col_privs->file->ha_index_end();
@@ -2406,7 +2407,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs)
return; /* purecov: deadcode */
}
my_hash_insert(&hash_columns, (uchar *) mem_check);
- } while (!col_privs->file->index_next(col_privs->record[0]) &&
+ } while (!col_privs->file->ha_index_next(col_privs->record[0]) &&
!key_cmp_if_same(col_privs,key,0,key_prefix_len));
col_privs->file->ha_index_end();
}
@@ -2547,8 +2548,8 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- if (table->file->index_read_map(table->record[0], user_key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_map(table->record[0], user_key,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
{
if (revoke_grant)
{
@@ -2625,9 +2626,9 @@ static int replace_column_table(GRANT_TABLE *g_t,
key_copy(user_key, table->record[0], table->key_info,
key_prefix_length);
- if (table->file->index_read_map(table->record[0], user_key,
- (key_part_map)15,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_map(table->record[0], user_key,
+ (key_part_map)15,
+ HA_READ_KEY_EXACT))
goto end;
/* Scan through all rows with the same host,db,user and table */
@@ -2678,7 +2679,7 @@ static int replace_column_table(GRANT_TABLE *g_t,
hash_delete(&g_t->hash_columns,(uchar*) grant_column);
}
}
- } while (!table->file->index_next(table->record[0]) &&
+ } while (!table->file->ha_index_next(table->record[0]) &&
!key_cmp_if_same(table, key, 0, key_prefix_length));
}
@@ -2728,9 +2729,9 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table,
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
- if (table->file->index_read_idx_map(table->record[0], 0, user_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
/*
The following should never happen as we first check the in memory
@@ -2855,10 +2856,10 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
TRUE);
store_record(table,record[1]); // store at pos 1
- if (table->file->index_read_idx_map(table->record[0], 0,
- (uchar*) table->field[0]->ptr,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar*) table->field[0]->ptr,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
/*
The following should never happen as we first check the in memory
@@ -3563,7 +3564,7 @@ static my_bool grant_load_procs_priv(TABLE *p_table)
p_table->file->ha_index_init(0, 1);
p_table->use_all_columns();
- if (!p_table->file->index_first(p_table->record[0]))
+ if (!p_table->file->ha_index_first(p_table->record[0]))
{
memex_ptr= &memex;
my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr);
@@ -3615,7 +3616,7 @@ static my_bool grant_load_procs_priv(TABLE *p_table)
goto end_unlock;
}
}
- while (!p_table->file->index_next(p_table->record[0]));
+ while (!p_table->file->ha_index_next(p_table->record[0]));
}
/* Return ok */
return_val= 0;
@@ -3665,7 +3666,7 @@ static my_bool grant_load(THD *thd, TABLE_LIST *tables)
t_table->use_all_columns();
c_table->use_all_columns();
- if (!t_table->file->index_first(t_table->record[0]))
+ if (!t_table->file->ha_index_first(t_table->record[0]))
{
memex_ptr= &memex;
my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr);
@@ -3700,7 +3701,7 @@ static my_bool grant_load(THD *thd, TABLE_LIST *tables)
goto end_unlock;
}
}
- while (!t_table->file->index_next(t_table->record[0]));
+ while (!t_table->file->ha_index_next(t_table->record[0]));
}
return_val=0; // Return ok
@@ -3972,6 +3973,8 @@ err:
{
char command[128];
get_privilege_desc(command, sizeof(command), want_access);
+ status_var_increment(thd->status_var.access_denied_errors);
+
my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0),
command,
sctx->priv_user,
@@ -5217,9 +5220,9 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
table->key_info->key_part[1].store_length);
key_copy(user_key, table->record[0], table->key_info, key_prefix_length);
- if ((error= table->file->index_read_idx_map(table->record[0], 0,
- user_key, (key_part_map)3,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
+ user_key, (key_part_map)3,
+ HA_READ_KEY_EXACT)))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
{
@@ -5254,7 +5257,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop,
DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'",
table->s->table_name.str, user_str, host_str));
#endif
- while ((error= table->file->rnd_next(table->record[0])) !=
+ while ((error= table->file->ha_rnd_next(table->record[0])) !=
HA_ERR_END_OF_FILE)
{
if (error)
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index a0616fa6a4a..580a6f7246f 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1376,6 +1376,12 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str,
table->s->table_name.str, (long) table));
+ if (table->file)
+ {
+ table->file->update_global_table_stats();
+ table->file->update_global_index_stats();
+ }
+
*table_ptr=table->next;
/*
When closing a MERGE parent or child table, detach the children first.
@@ -1909,6 +1915,13 @@ void close_temporary(TABLE *table, bool free_share, bool delete_table)
DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
table->s->db.str, table->s->table_name.str));
+ /* in_use is not set for replication temporary tables during shutdown */
+ if (table->in_use)
+ {
+ table->file->update_global_table_stats();
+ table->file->update_global_index_stats();
+ }
+
free_io_cache(table);
closefrm(table, 0);
if (delete_table)
@@ -5616,6 +5629,9 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
table->covering_keys.intersect(field->part_of_key);
table->merge_keys.merge(field->part_of_key);
+ if (field->vcol_info)
+ table->mark_virtual_col(field);
+
if (thd->mark_used_columns == MARK_COLUMNS_READ)
current_bitmap= table->read_set;
else
@@ -7911,6 +7927,12 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
{
/* Mark fields as used to allow storage engine to optimze access */
bitmap_set_bit(field->table->read_set, field->field_index);
+ /*
+ Mark virtual fields for write and others that the virtual fields
+ depend on for read.
+ */
+ if (field->vcol_info)
+ field->table->mark_virtual_col(field);
if (table)
{
table->covering_keys.intersect(field->part_of_key);
@@ -8122,7 +8144,10 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
Item *value, *fld;
Item_field *field;
TABLE *table= 0;
+ List<TABLE> tbl_list;
+ bool abort_on_warning_saved= thd->abort_on_warning;
DBUG_ENTER("fill_record");
+ tbl_list.empty();
/*
Reset the table->auto_increment_field_not_null as it is valid for
@@ -8156,14 +8181,54 @@ fill_record(THD * thd, List<Item> &fields, List<Item> &values,
table= rfield->table;
if (rfield == table->next_number_field)
table->auto_increment_field_not_null= TRUE;
+ if (rfield->vcol_info &&
+ value->type() != Item::DEFAULT_VALUE_ITEM &&
+ value->type() != Item::NULL_ITEM &&
+ table->s->table_category != TABLE_CATEGORY_TEMPORARY)
+ {
+ thd->abort_on_warning= FALSE;
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
+ ER(ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
+ rfield->field_name, table->s->table_name.str);
+ thd->abort_on_warning= abort_on_warning_saved;
+ }
if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors)
{
my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0));
goto err;
}
+ tbl_list.push_back(table);
}
+ /* Update virtual fields*/
+ thd->abort_on_warning= FALSE;
+ if (tbl_list.head())
+ {
+ List_iterator_fast<TABLE> it(tbl_list);
+ TABLE *prev_table= 0;
+ while ((table= it++))
+ {
+ /*
+ Do simple optimization to prevent unnecessary re-generating
+ values for virtual fields
+ */
+ if (table != prev_table)
+ {
+ prev_table= table;
+ if (table->vfield)
+ {
+ if (update_virtual_fields(table, TRUE))
+ {
+ goto err;
+ }
+ }
+ }
+ }
+ }
+ thd->abort_on_warning= abort_on_warning_saved;
DBUG_RETURN(thd->is_error());
err:
+ thd->abort_on_warning= abort_on_warning_saved;
if (table)
table->auto_increment_field_not_null= FALSE;
DBUG_RETURN(TRUE);
@@ -8199,9 +8264,31 @@ fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields,
Table_triggers_list *triggers,
enum trg_event_type event)
{
- return (fill_record(thd, fields, values, ignore_errors) ||
- (triggers && triggers->process_triggers(thd, event,
- TRG_ACTION_BEFORE, TRUE)));
+ bool result;
+ result= (fill_record(thd, fields, values, ignore_errors) ||
+ (triggers && triggers->process_triggers(thd, event,
+ TRG_ACTION_BEFORE, TRUE)));
+ /*
+ Re-calculate virtual fields to cater for cases when base columns are
+ updated by the triggers.
+ */
+ if (!result && triggers)
+ {
+ TABLE *table= 0;
+ List_iterator_fast<Item> f(fields);
+ Item *fld;
+ Item_field *item_field;
+ if (fields.elements)
+ {
+ fld= (Item_field*)f++;
+ item_field= fld->filed_for_view_update();
+ if (item_field && item_field->field &&
+ (table= item_field->field->table) &&
+ table->vfield)
+ result= update_virtual_fields(table, TRUE);
+ }
+ }
+ return result;
}
@@ -8229,11 +8316,14 @@ bool
fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors)
{
List_iterator_fast<Item> v(values);
+ List<TABLE> tbl_list;
Item *value;
TABLE *table= 0;
+ bool abort_on_warning_saved= thd->abort_on_warning;
DBUG_ENTER("fill_record");
Field *field;
+ tbl_list.empty();
/*
Reset the table->auto_increment_field_not_null as it is valid for
only one row.
@@ -8253,12 +8343,52 @@ fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors)
table= field->table;
if (field == table->next_number_field)
table->auto_increment_field_not_null= TRUE;
+ if (field->vcol_info &&
+ value->type() != Item::DEFAULT_VALUE_ITEM &&
+ value->type() != Item::NULL_ITEM &&
+ table->s->table_category != TABLE_CATEGORY_TEMPORARY)
+ {
+ thd->abort_on_warning= FALSE;
+ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
+ ER(ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
+ field->field_name, table->s->table_name.str);
+ thd->abort_on_warning= abort_on_warning_saved;
+ }
if (value->save_in_field(field, 0) < 0)
goto err;
+ tbl_list.push_back(table);
}
+ /* Update virtual fields*/
+ thd->abort_on_warning= FALSE;
+ if (tbl_list.head())
+ {
+ List_iterator_fast<TABLE> t(tbl_list);
+ TABLE *prev_table= 0;
+ while ((table= t++))
+ {
+ /*
+ Do simple optimization to prevent unnecessary re-generating
+ values for virtual fields
+ */
+ if (table != prev_table)
+ {
+ prev_table= table;
+ if (table->vfield)
+ {
+ if (update_virtual_fields(table, TRUE))
+ {
+ goto err;
+ }
+ }
+ }
+ }
+ }
+ thd->abort_on_warning= abort_on_warning_saved;
DBUG_RETURN(thd->is_error());
err:
+ thd->abort_on_warning= abort_on_warning_saved;
if (table)
table->auto_increment_field_not_null= FALSE;
DBUG_RETURN(TRUE);
@@ -8294,9 +8424,22 @@ fill_record_n_invoke_before_triggers(THD *thd, Field **ptr,
Table_triggers_list *triggers,
enum trg_event_type event)
{
- return (fill_record(thd, ptr, values, ignore_errors) ||
- (triggers && triggers->process_triggers(thd, event,
- TRG_ACTION_BEFORE, TRUE)));
+ bool result;
+ result= (fill_record(thd, ptr, values, ignore_errors) ||
+ (triggers && triggers->process_triggers(thd, event,
+ TRG_ACTION_BEFORE, TRUE)));
+ /*
+ Re-calculate virtual fields to cater for cases when base columns are
+ updated by the triggers.
+ */
+ if (!result && triggers && *ptr)
+ {
+ TABLE *table= (*ptr)->table;
+ if (table->vfield)
+ result= update_virtual_fields(table, TRUE);
+ }
+ return result;
+
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 0e837d382b0..83108f58c68 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -195,6 +195,59 @@ bool foreign_key_prefix(Key *a, Key *b)
#endif
}
+/*
+ @brief
+ Check if the foreign key options are compatible with the specification
+ of the columns on which the key is created
+
+ @retval
+ FALSE The foreign key options are compatible with key columns
+ @retval
+ TRUE Otherwise
+*/
+bool Foreign_key::validate(List<Create_field> &table_fields)
+{
+ Create_field *sql_field;
+ Key_part_spec *column;
+ List_iterator<Key_part_spec> cols(columns);
+ List_iterator<Create_field> it(table_fields);
+ DBUG_ENTER("Foreign_key::validate");
+ while ((column= cols++))
+ {
+ it.rewind();
+ while ((sql_field= it++) &&
+ my_strcasecmp(system_charset_info,
+ column->field_name,
+ sql_field->field_name)) {}
+ if (!sql_field)
+ {
+ my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name);
+ DBUG_RETURN(TRUE);
+ }
+ if (type == Key::FOREIGN_KEY && sql_field->vcol_info)
+ {
+ if (delete_opt == FK_OPTION_SET_NULL)
+ {
+ my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
+ "ON DELETE SET NULL");
+ DBUG_RETURN(TRUE);
+ }
+ if (update_opt == FK_OPTION_SET_NULL)
+ {
+ my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
+ "ON UPDATE SET NULL");
+ DBUG_RETURN(TRUE);
+ }
+ if (update_opt == FK_OPTION_CASCADE)
+ {
+ my_error(ER_WRONG_FK_OPTION_FOR_VIRTUAL_COLUMN, MYF(0),
+ "ON UPDATE CASCADE");
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
/****************************************************************************
** Thread specific functions
@@ -644,6 +697,7 @@ THD::THD()
mysys_var=0;
binlog_evt_union.do_union= FALSE;
enable_slow_log= 0;
+
#ifndef DBUG_OFF
dbug_sentry=THD_SENTRY_MAGIC;
#endif
@@ -842,14 +896,69 @@ void THD::init(void)
update_charset();
reset_current_stmt_binlog_row_based();
bzero((char *) &status_var, sizeof(status_var));
+ bzero((char *) &org_status_var, sizeof(org_status_var));
sql_log_bin_toplevel= options & OPTION_BIN_LOG;
-
+ select_commands= update_commands= other_commands= 0;
+ /* Set to handle counting of aborted connections */
+ userstat_running= opt_userstat_running;
+ last_global_update_time= current_connect_time= time(NULL);
#if defined(ENABLED_DEBUG_SYNC)
/* Initialize the Debug Sync Facility. See debug_sync.cc. */
debug_sync_init_thread(this);
#endif /* defined(ENABLED_DEBUG_SYNC) */
}
+
+/* Updates some status variables to be used by update_global_user_stats */
+
+void THD::update_stats(void)
+{
+ /* sql_command == SQLCOM_END in case of parse errors or quit */
+ if (lex->sql_command != SQLCOM_END)
+ {
+ /* The replication thread has the COM_CONNECT command */
+ DBUG_ASSERT(command == COM_QUERY || command == COM_CONNECT);
+
+ /* A SQL query. */
+ if (lex->sql_command == SQLCOM_SELECT)
+ select_commands++;
+ else if (! sql_command_flags[lex->sql_command] & CF_STATUS_COMMAND)
+ {
+ /* Ignore 'SHOW ' commands */
+ }
+ else if (is_update_query(lex->sql_command))
+ update_commands++;
+ else
+ other_commands++;
+ }
+}
+
+
+void THD::update_all_stats()
+{
+ time_t save_time;
+ ulonglong end_cpu_time, end_utime;
+ double busy_time, cpu_time;
+
+ /* This is set at start of query if opt_userstat_running was set */
+ if (!userstat_running)
+ return;
+
+ end_cpu_time= my_getcputime();
+ end_utime= my_micro_time_and_time(&save_time);
+ busy_time= (end_utime - start_utime) / 1000000.0;
+ cpu_time= (end_cpu_time - start_cpu_time) / 10000000.0;
+ /* In case there are bad values, 2629743 is the #seconds in a month. */
+ if (cpu_time > 2629743.0)
+ cpu_time= 0;
+ status_var_add(status_var.cpu_time, cpu_time);
+ status_var_add(status_var.busy_time, busy_time);
+
+ /* Updates THD stats and the global user stats. */
+ update_stats();
+ update_global_user_stats(this, TRUE, save_time);
+}
+
/*
Init THD for query processing.
@@ -1020,9 +1129,8 @@ THD::~THD()
from_var from this array
NOTES
- This function assumes that all variables are long/ulong.
- If this assumption will change, then we have to explictely add
- the other variables after the while loop
+ This function assumes that all variables at start are long/ulong and
+ other types are handled explicitely
*/
void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
@@ -1034,6 +1142,13 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
while (to != end)
*(to++)+= *(from++);
+
+ /* Handle the not ulong variables. See end of system_status_var */
+ to_var->bytes_received= from_var->bytes_received;
+ to_var->bytes_sent+= from_var->bytes_sent;
+ to_var->binlog_bytes_written= from_var->binlog_bytes_written;
+ to_var->cpu_time+= from_var->cpu_time;
+ to_var->busy_time+= from_var->busy_time;
}
/*
@@ -1046,7 +1161,8 @@ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var)
dec_var minus this array
NOTE
- This function assumes that all variables are long/ulong.
+ This function assumes that all variables at start are long/ulong and
+ other types are handled explicitely
*/
void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
@@ -1059,6 +1175,14 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
while (to != end)
*(to++)+= *(from++) - *(dec++);
+
+ to_var->bytes_received= (from_var->bytes_received -
+ dec_var->bytes_received);
+ to_var->bytes_sent+= from_var->bytes_sent - dec_var->bytes_sent;
+ to_var->binlog_bytes_written= (from_var->binlog_bytes_written -
+ dec_var->binlog_bytes_written);
+ to_var->cpu_time+= from_var->cpu_time - dec_var->cpu_time;
+ to_var->busy_time+= from_var->busy_time - dec_var->busy_time;
}
#define SECONDS_TO_WAIT_FOR_KILL 2
@@ -2832,7 +2956,8 @@ void thd_increment_bytes_sent(ulong length)
{
THD *thd=current_thd;
if (likely(thd != 0))
- { /* current_thd==0 when close_connection() calls net_send_error() */
+ {
+ /* current_thd == 0 when close_connection() calls net_send_error() */
thd->status_var.bytes_sent+= length;
}
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index dc19f3d24ba..2508a0bb0a9 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -251,6 +251,8 @@ public:
*/
virtual Key *clone(MEM_ROOT *mem_root) const
{ return new (mem_root) Foreign_key(*this, mem_root); }
+ /* Used to validate foreign key options */
+ bool validate(List<Create_field> &table_fields);
};
typedef struct st_mysql_lock
@@ -417,8 +419,6 @@ struct system_variables
typedef struct system_status_var
{
- ulonglong bytes_received;
- ulonglong bytes_sent;
ulong com_other;
ulong com_stat[(uint) SQLCOM_END];
ulong created_tmp_disk_tables;
@@ -457,6 +457,8 @@ typedef struct system_status_var
ulong select_range_count;
ulong select_range_check_count;
ulong select_scan_count;
+ ulong rows_read;
+ ulong rows_sent;
ulong long_query_count;
ulong filesort_merge_passes;
ulong filesort_range_count;
@@ -474,6 +476,9 @@ typedef struct system_status_var
Number of statements sent from the client
*/
ulong questions;
+ ulong empty_queries;
+ ulong access_denied_errors; /* Can only be 0 or 1 */
+ ulong lost_connections;
/*
IMPORTANT!
SEE last_system_status_var DEFINITION BELOW.
@@ -482,12 +487,16 @@ typedef struct system_status_var
Status variables which it does not make sense to add to
global status variable counter
*/
+ ulonglong bytes_received;
+ ulonglong bytes_sent;
+ ulonglong binlog_bytes_written;
double last_query_cost;
+ double cpu_time, busy_time;
} STATUS_VAR;
/*
This is used for 'SHOW STATUS'. It must be updated to the last ulong
- variable in system_status_var which is makes sens to add to the global
+ variable in system_status_var which is makes sense to add to the global
counter
*/
@@ -1329,6 +1338,7 @@ public:
struct my_rnd_struct rand; // used for authentication
struct system_variables variables; // Changeable local variables
struct system_status_var status_var; // Per thread statistic vars
+ struct system_status_var org_status_var; // For user statistics
struct system_status_var *initial_status_var; /* used by show status */
THR_LOCK_INFO lock_info; // Locking info of this thread
THR_LOCK_OWNER main_lock_id; // To use for conventional queries
@@ -1429,6 +1439,8 @@ public:
uint in_sub_stmt;
/* TRUE when the current top has SQL_LOG_BIN ON */
bool sql_log_bin_toplevel;
+ /* True when opt_userstat_running is set at start of query */
+ bool userstat_running;
/* container for handler's private per-connection data */
Ha_data ha_data[MAX_HA];
@@ -1872,6 +1884,21 @@ public:
*/
LOG_INFO* current_linfo;
NET* slave_net; // network connection from slave -> m.
+
+ /*
+ Used to update global user stats. The global user stats are updated
+ occasionally with the 'diff' variables. After the update, the 'diff'
+ variables are reset to 0.
+ */
+ /* Time when the current thread connected to MySQL. */
+ time_t current_connect_time;
+ /* Last time when THD stats were updated in global_user_stats. */
+ time_t last_global_update_time;
+ /* Number of commands not reflected in global_user_stats yet. */
+ uint select_commands, update_commands, other_commands;
+ ulonglong start_cpu_time;
+ ulonglong start_bytes_received;
+
/* Used by the sys_var class to store temporary values */
union
{
@@ -1937,6 +1964,8 @@ public:
alloc_root.
*/
void init_for_queries();
+ void update_all_stats();
+ void update_stats(void);
void change_user(void);
void cleanup(void);
void cleanup_after_query();
@@ -2359,7 +2388,6 @@ private:
MEM_ROOT main_mem_root;
};
-
/** A short cut for thd->main_da.set_ok_status(). */
inline void
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 8f02828e18c..698bebfd0f1 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -20,6 +20,13 @@
#include "mysql_priv.h"
+HASH global_user_stats, global_client_stats, global_table_stats;
+HASH global_index_stats;
+/* Protects the above global stats */
+extern pthread_mutex_t LOCK_global_user_client_stats;
+extern pthread_mutex_t LOCK_global_table_stats;
+extern pthread_mutex_t LOCK_global_index_stats;
+
#ifdef HAVE_OPENSSL
/*
Without SSL the handshake consists of one packet. This packet
@@ -459,6 +466,7 @@ check_user(THD *thd, enum enum_server_command command,
check_for_max_user_connections(thd, thd->user_connect))
{
/* The error is set in check_for_max_user_connections(). */
+ status_var_increment(denied_connections);
DBUG_RETURN(1);
}
@@ -470,6 +478,7 @@ check_user(THD *thd, enum enum_server_command command,
/* mysql_change_db() has pushed the error message. */
if (thd->user_connect)
decrease_user_connections(thd->user_connect);
+ status_var_increment(thd->status_var.access_denied_errors);
DBUG_RETURN(1);
}
}
@@ -493,6 +502,8 @@ check_user(THD *thd, enum enum_server_command command,
thd->main_security_ctx.user,
thd->main_security_ctx.host_or_ip,
passwd_len ? ER(ER_YES) : ER(ER_NO));
+ status_var_increment(thd->status_var.access_denied_errors);
+
DBUG_RETURN(1);
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
@@ -520,10 +531,14 @@ extern "C" void free_user(struct user_conn *uc)
void init_max_user_conn(void)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- (void) hash_init(&hash_user_connections,system_charset_info,max_connections,
- 0,0,
- (hash_get_key) get_key_conn, (hash_free_key) free_user,
- 0);
+ if (hash_init(&hash_user_connections,system_charset_info,max_connections,
+ 0,0,
+ (hash_get_key) get_key_conn, (hash_free_key) free_user,
+ 0))
+ {
+ sql_print_error("Initializing hash_user_connections failed.");
+ exit(1);
+ }
#endif
}
@@ -576,6 +591,445 @@ void reset_mqh(LEX_USER *lu, bool get_them= 0)
#endif /* NO_EMBEDDED_ACCESS_CHECKS */
}
+/*****************************************************************************
+ Handle users statistics
+*****************************************************************************/
+
+/* 'mysql_system_user' is used for when the user is not defined for a THD. */
+static const char mysql_system_user[]= "#mysql_system#";
+
+// Returns 'user' if it's not NULL. Returns 'mysql_system_user' otherwise.
+static const char * get_valid_user_string(char* user)
+{
+ return user ? user : mysql_system_user;
+}
+
+/*
+ Returns string as 'IP' for the client-side of the connection represented by
+ 'client'. Does not allocate memory. May return "".
+*/
+
+static const char *get_client_host(THD *client)
+{
+ return client->security_ctx->host_or_ip[0] ?
+ client->security_ctx->host_or_ip :
+ client->security_ctx->host ? client->security_ctx->host : "";
+}
+
+extern "C" uchar *get_key_user_stats(USER_STATS *user_stats, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= user_stats->user_name_length;
+ return (uchar*) user_stats->user;
+}
+
+void free_user_stats(USER_STATS* user_stats)
+{
+ my_free(user_stats, MYF(0));
+}
+
+void init_user_stats(USER_STATS *user_stats,
+ const char *user,
+ size_t user_length,
+ const char *priv_user,
+ uint total_connections,
+ uint concurrent_connections,
+ time_t connected_time,
+ double busy_time,
+ double cpu_time,
+ ulonglong bytes_received,
+ ulonglong bytes_sent,
+ ulonglong binlog_bytes_written,
+ ha_rows rows_sent,
+ ha_rows rows_read,
+ ha_rows rows_inserted,
+ ha_rows rows_deleted,
+ ha_rows rows_updated,
+ ulonglong select_commands,
+ ulonglong update_commands,
+ ulonglong other_commands,
+ ulonglong commit_trans,
+ ulonglong rollback_trans,
+ ulonglong denied_connections,
+ ulonglong lost_connections,
+ ulonglong access_denied_errors,
+ ulonglong empty_queries)
+{
+ DBUG_ENTER("init_user_stats");
+ DBUG_PRINT("enter", ("user: %s priv_user: %s", user, priv_user));
+
+ user_length= min(user_length, sizeof(user_stats->user)-1);
+ memcpy(user_stats->user, user, user_length);
+ user_stats->user[user_length]= 0;
+ user_stats->user_name_length= user_length;
+ strmake(user_stats->priv_user, priv_user, sizeof(user_stats->priv_user)-1);
+
+ user_stats->total_connections= total_connections;
+ user_stats->concurrent_connections= concurrent_connections;
+ user_stats->connected_time= connected_time;
+ user_stats->busy_time= busy_time;
+ user_stats->cpu_time= cpu_time;
+ user_stats->bytes_received= bytes_received;
+ user_stats->bytes_sent= bytes_sent;
+ user_stats->binlog_bytes_written= binlog_bytes_written;
+ user_stats->rows_sent= rows_sent;
+ user_stats->rows_updated= rows_updated;
+ user_stats->rows_read= rows_read;
+ user_stats->select_commands= select_commands;
+ user_stats->update_commands= update_commands;
+ user_stats->other_commands= other_commands;
+ user_stats->commit_trans= commit_trans;
+ user_stats->rollback_trans= rollback_trans;
+ user_stats->denied_connections= denied_connections;
+ user_stats->lost_connections= lost_connections;
+ user_stats->access_denied_errors= access_denied_errors;
+ user_stats->empty_queries= empty_queries;
+ DBUG_VOID_RETURN;
+}
+
+
+#ifdef COMPLEAT_PATCH_NOT_ADDED_YET
+
+void add_user_stats(USER_STATS *user_stats,
+ uint total_connections,
+ uint concurrent_connections,
+ time_t connected_time,
+ double busy_time,
+ double cpu_time,
+ ulonglong bytes_received,
+ ulonglong bytes_sent,
+ ulonglong binlog_bytes_written,
+ ha_rows rows_sent,
+ ha_rows rows_read,
+ ha_rows rows_inserted,
+ ha_rows rows_deleted,
+ ha_rows rows_updated,
+ ulonglong select_commands,
+ ulonglong update_commands,
+ ulonglong other_commands,
+ ulonglong commit_trans,
+ ulonglong rollback_trans,
+ ulonglong denied_connections,
+ ulonglong lost_connections,
+ ulonglong access_denied_errors,
+ ulonglong empty_queries)
+{
+ user_stats->total_connections+= total_connections;
+ user_stats->concurrent_connections+= concurrent_connections;
+ user_stats->connected_time+= connected_time;
+ user_stats->busy_time+= busy_time;
+ user_stats->cpu_time+= cpu_time;
+ user_stats->bytes_received+= bytes_received;
+ user_stats->bytes_sent+= bytes_sent;
+ user_stats->binlog_bytes_written+= binlog_bytes_written;
+ user_stats->rows_sent+= rows_sent;
+ user_stats->rows_inserted+= rows_inserted;
+ user_stats->rows_deleted+= rows_deleted;
+ user_stats->rows_updated+= rows_updated;
+ user_stats->rows_read+= rows_read;
+ user_stats->select_commands+= select_commands;
+ user_stats->update_commands+= update_commands;
+ user_stats->other_commands+= other_commands;
+ user_stats->commit_trans+= commit_trans;
+ user_stats->rollback_trans+= rollback_trans;
+ user_stats->denied_connections+= denied_connections;
+ user_stats->lost_connections+= lost_connections;
+ user_stats->access_denied_errors+= access_denied_errors;
+ user_stats->empty_queries+= empty_queries;
+}
+#endif
+
+
+void init_global_user_stats(void)
+{
+ if (hash_init(&global_user_stats, system_charset_info, max_connections,
+ 0, 0, (hash_get_key) get_key_user_stats,
+ (hash_free_key)free_user_stats, 0))
+ {
+ sql_print_error("Initializing global_user_stats failed.");
+ exit(1);
+ }
+}
+
+void init_global_client_stats(void)
+{
+ if (hash_init(&global_client_stats, system_charset_info, max_connections,
+ 0, 0, (hash_get_key) get_key_user_stats,
+ (hash_free_key)free_user_stats, 0))
+ {
+ sql_print_error("Initializing global_client_stats failed.");
+ exit(1);
+ }
+}
+
+extern "C" uchar *get_key_table_stats(TABLE_STATS *table_stats, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= table_stats->table_name_length;
+ return (uchar*) table_stats->table;
+}
+
+extern "C" void free_table_stats(TABLE_STATS* table_stats)
+{
+ my_free(table_stats, MYF(0));
+}
+
+void init_global_table_stats(void)
+{
+ if (hash_init(&global_table_stats, system_charset_info, max_connections,
+ 0, 0, (hash_get_key) get_key_table_stats,
+ (hash_free_key)free_table_stats, 0)) {
+ sql_print_error("Initializing global_table_stats failed.");
+ exit(1);
+ }
+}
+
+extern "C" uchar *get_key_index_stats(INDEX_STATS *index_stats, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= index_stats->index_name_length;
+ return (uchar*) index_stats->index;
+}
+
+extern "C" void free_index_stats(INDEX_STATS* index_stats)
+{
+ my_free(index_stats, MYF(0));
+}
+
+void init_global_index_stats(void)
+{
+ if (hash_init(&global_index_stats, system_charset_info, max_connections,
+ 0, 0, (hash_get_key) get_key_index_stats,
+ (hash_free_key)free_index_stats, 0))
+ {
+ sql_print_error("Initializing global_index_stats failed.");
+ exit(1);
+ }
+}
+
+
+void free_global_user_stats(void)
+{
+ hash_free(&global_user_stats);
+}
+
+void free_global_table_stats(void)
+{
+ hash_free(&global_table_stats);
+}
+
+void free_global_index_stats(void)
+{
+ hash_free(&global_index_stats);
+}
+
+void free_global_client_stats(void)
+{
+ hash_free(&global_client_stats);
+}
+
+/*
+ Increments the global stats connection count for an entry from
+ global_client_stats or global_user_stats. Returns 0 on success
+ and 1 on error.
+*/
+
+static bool increment_count_by_name(const char *name, size_t name_length,
+ const char *role_name,
+ HASH *users_or_clients, THD *thd)
+{
+ USER_STATS *user_stats;
+
+ if (!(user_stats= (USER_STATS*) hash_search(users_or_clients, (uchar*) name,
+ name_length)))
+ {
+ /* First connection for this user or client */
+ if (!(user_stats= ((USER_STATS*)
+ my_malloc(sizeof(USER_STATS),
+ MYF(MY_WME | MY_ZEROFILL)))))
+ return TRUE; // Out of memory
+
+ init_user_stats(user_stats, name, name_length, role_name,
+ 0, 0, // connections
+ 0, 0, 0, // time
+ 0, 0, 0, // bytes sent, received and written
+ 0, 0, // Rows sent and read
+ 0, 0, 0, // rows inserted, deleted and updated
+ 0, 0, 0, // select, update and other commands
+ 0, 0, // commit and rollback trans
+ thd->status_var.access_denied_errors,
+ 0, // lost connections
+ 0, // access denied errors
+ 0); // empty queries
+
+ if (my_hash_insert(users_or_clients, (uchar*)user_stats))
+ {
+ my_free(user_stats, 0);
+ return TRUE; // Out of memory
+ }
+ }
+ user_stats->total_connections++;
+ return FALSE;
+}
+
+
+/*
+ Increments the global user and client stats connection count.
+
+ @param use_lock if true, LOCK_global_user_client_stats will be locked
+
+ @retval 0 ok
+ @retval 1 error.
+*/
+
+#ifndef EMBEDDED_LIBRARY
+static bool increment_connection_count(THD* thd, bool use_lock)
+{
+ const char *user_string= get_valid_user_string(thd->main_security_ctx.user);
+ const char *client_string= get_client_host(thd);
+ bool return_value= FALSE;
+
+ if (!thd->userstat_running)
+ return FALSE;
+
+ if (use_lock)
+ pthread_mutex_lock(&LOCK_global_user_client_stats);
+
+ if (increment_count_by_name(user_string, strlen(user_string), user_string,
+ &global_user_stats, thd))
+ {
+ return_value= TRUE;
+ goto end;
+ }
+ if (increment_count_by_name(client_string, strlen(client_string),
+ user_string, &global_client_stats, thd))
+ {
+ return_value= TRUE;
+ goto end;
+ }
+
+end:
+ if (use_lock)
+ pthread_mutex_unlock(&LOCK_global_user_client_stats);
+ return return_value;
+}
+#endif
+
+/*
+ Used to update the global user and client stats
+*/
+
+static void update_global_user_stats_with_user(THD *thd,
+ USER_STATS *user_stats,
+ time_t now)
+{
+ DBUG_ASSERT(thd->userstat_running);
+
+ user_stats->connected_time+= now - thd->last_global_update_time;
+ user_stats->busy_time+= (thd->status_var.busy_time -
+ thd->org_status_var.busy_time);
+ user_stats->cpu_time+= (thd->status_var.cpu_time -
+ thd->org_status_var.cpu_time);
+ /*
+ This is handle specially as bytes_recieved is incremented BEFORE
+ org_status_var is copied.
+ */
+ user_stats->bytes_received+= (thd->org_status_var.bytes_received-
+ thd->start_bytes_received);
+ user_stats->bytes_sent+= (thd->status_var.bytes_sent -
+ thd->org_status_var.bytes_sent);
+ user_stats->binlog_bytes_written+=
+ (thd->status_var.binlog_bytes_written -
+ thd->org_status_var.binlog_bytes_written);
+ user_stats->rows_read+= (thd->status_var.rows_read -
+ thd->org_status_var.rows_read);
+ user_stats->rows_sent+= (thd->status_var.rows_sent -
+ thd->org_status_var.rows_sent);
+ user_stats->rows_inserted+= (thd->status_var.ha_write_count -
+ thd->org_status_var.ha_write_count);
+ user_stats->rows_deleted+= (thd->status_var.ha_delete_count -
+ thd->org_status_var.ha_delete_count);
+ user_stats->rows_updated+= (thd->status_var.ha_update_count -
+ thd->org_status_var.ha_update_count);
+ user_stats->select_commands+= thd->select_commands;
+ user_stats->update_commands+= thd->update_commands;
+ user_stats->other_commands+= thd->other_commands;
+ user_stats->commit_trans+= (thd->status_var.ha_commit_count -
+ thd->org_status_var.ha_commit_count);
+ user_stats->rollback_trans+= (thd->status_var.ha_rollback_count +
+ thd->status_var.ha_savepoint_rollback_count -
+ thd->org_status_var.ha_rollback_count -
+ thd->org_status_var.
+ ha_savepoint_rollback_count);
+ user_stats->access_denied_errors+=
+ (thd->status_var.access_denied_errors -
+ thd->org_status_var.access_denied_errors);
+ user_stats->empty_queries+= (thd->status_var.empty_queries -
+ thd->org_status_var.empty_queries);
+
+ /* The following can only contain 0 or 1 and then connection ends */
+ user_stats->denied_connections+= thd->status_var.access_denied_errors;
+ user_stats->lost_connections+= thd->status_var.lost_connections;
+}
+
+
+/* Updates the global stats of a user or client */
+void update_global_user_stats(THD *thd, bool create_user, time_t now)
+{
+ const char *user_string, *client_string;
+ USER_STATS *user_stats;
+ size_t user_string_length, client_string_length;
+ DBUG_ASSERT(thd->userstat_running);
+
+ user_string= get_valid_user_string(thd->main_security_ctx.user);
+ user_string_length= strlen(user_string);
+ client_string= get_client_host(thd);
+ client_string_length= strlen(client_string);
+
+ pthread_mutex_lock(&LOCK_global_user_client_stats);
+
+ // Update by user name
+ if ((user_stats= (USER_STATS*) hash_search(&global_user_stats,
+ (uchar*) user_string,
+ user_string_length)))
+ {
+ /* Found user. */
+ update_global_user_stats_with_user(thd, user_stats, now);
+ }
+ else
+ {
+ /* Create the entry */
+ if (create_user)
+ {
+ increment_count_by_name(user_string, user_string_length, user_string,
+ &global_user_stats, thd);
+ }
+ }
+
+ /* Update by client IP */
+ if ((user_stats= (USER_STATS*)hash_search(&global_client_stats,
+ (uchar*) client_string,
+ client_string_length)))
+ {
+ // Found by client IP
+ update_global_user_stats_with_user(thd, user_stats, now);
+ }
+ else
+ {
+ // Create the entry
+ if (create_user)
+ {
+ increment_count_by_name(client_string, client_string_length,
+ user_string, &global_client_stats, thd);
+ }
+ }
+ /* Reset variables only used for counting */
+ thd->select_commands= thd->update_commands= thd->other_commands= 0;
+ thd->last_global_update_time= now;
+
+ pthread_mutex_unlock(&LOCK_global_user_client_stats);
+}
+
void thd_init_client_charset(THD *thd, uint cs_number)
{
@@ -970,6 +1424,14 @@ bool login_connection(THD *thd)
/* Connect completed, set read/write timeouts back to default */
my_net_set_read_timeout(net, thd->variables.net_read_timeout);
my_net_set_write_timeout(net, thd->variables.net_write_timeout);
+
+ /* Updates global user connection stats. */
+ if (increment_connection_count(thd, TRUE))
+ {
+ net_send_error(thd, ER_OUTOFMEMORY); // Out of memory
+ DBUG_RETURN(1);
+ }
+
DBUG_RETURN(0);
}
@@ -991,6 +1453,7 @@ void end_connection(THD *thd)
if (thd->killed || (net->error && net->vio != 0))
{
statistic_increment(aborted_threads,&LOCK_status);
+ status_var_increment(thd->status_var.lost_connections);
}
if (net->error && net->vio != 0)
@@ -1117,10 +1580,14 @@ pthread_handler_t handle_one_connection(void *arg)
for (;;)
{
NET *net= &thd->net;
+ bool create_user= TRUE;
lex_start(thd);
if (login_connection(thd))
+ {
+ create_user= FALSE;
goto end_thread;
+ }
prepare_new_connection_state(thd);
@@ -1134,12 +1601,14 @@ pthread_handler_t handle_one_connection(void *arg)
end_thread:
close_connection(thd, 0, 1);
+ if (thd->userstat_running)
+ update_global_user_stats(thd, create_user, time(NULL));
+
if (thd->scheduler->end_thread(thd,1))
return 0; // Probably no-threads
/*
- If end_thread() returns, we are either running with
- thread-handler=no-threads or this thread has been schedule to
+ If end_thread() returns, this thread has been schedule to
handle the next connection.
*/
thd= current_thd;
diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc
index 6f61dc40f66..71c28f425d4 100644
--- a/sql/sql_cursor.cc
+++ b/sql/sql_cursor.cc
@@ -655,7 +655,7 @@ void Materialized_cursor::fetch(ulong num_rows)
result->begin_dataset();
for (fetch_limit+= num_rows; fetch_count < fetch_limit; fetch_count++)
{
- if ((res= table->file->rnd_next(table->record[0])))
+ if ((res= table->file->ha_rnd_next(table->record[0])))
break;
/* Send data only if the read was successful. */
result->send_data(item_list);
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 3bbf4b78d07..2f3ce99ab9c 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -562,8 +562,8 @@ retry:
if (table->file->inited != handler::NONE)
{
error=keyname ?
- table->file->index_next(table->record[0]) :
- table->file->rnd_next(table->record[0]);
+ table->file->ha_index_next(table->record[0]) :
+ table->file->ha_rnd_next(table->record[0]);
break;
}
/* else fall through */
@@ -572,13 +572,13 @@ retry:
{
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno, 1);
- error= table->file->index_first(table->record[0]);
+ error= table->file->ha_index_first(table->record[0]);
}
else
{
table->file->ha_index_or_rnd_end();
if (!(error= table->file->ha_rnd_init(1)))
- error= table->file->rnd_next(table->record[0]);
+ error= table->file->ha_rnd_next(table->record[0]);
}
mode=RNEXT;
break;
@@ -586,7 +586,7 @@ retry:
DBUG_ASSERT(keyname != 0);
if (table->file->inited != handler::NONE)
{
- error=table->file->index_prev(table->record[0]);
+ error=table->file->ha_index_prev(table->record[0]);
break;
}
/* else fall through */
@@ -594,13 +594,13 @@ retry:
DBUG_ASSERT(keyname != 0);
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno, 1);
- error= table->file->index_last(table->record[0]);
+ error= table->file->ha_index_last(table->record[0]);
mode=RPREV;
break;
case RNEXT_SAME:
/* Continue scan on "(keypart1,keypart2,...)=(c1, c2, ...) */
DBUG_ASSERT(keyname != 0);
- error= table->file->index_next_same(table->record[0], key, key_len);
+ error= table->file->ha_index_next_same(table->record[0], key, key_len);
break;
case RKEY:
{
@@ -640,8 +640,8 @@ retry:
table->file->ha_index_or_rnd_end();
table->file->ha_index_init(keyno, 1);
key_copy(key, table->record[0], table->key_info + keyno, key_len);
- error= table->file->index_read_map(table->record[0],
- key, keypart_map, ha_rkey_mode);
+ error= table->file->ha_index_read_map(table->record[0],
+ key, keypart_map, ha_rkey_mode);
mode=rkey_to_rnext[(int)ha_rkey_mode];
break;
}
@@ -663,6 +663,8 @@ retry:
}
goto ok;
}
+ /* Generate values for virtual fields */
+ update_virtual_fields(table);
if (cond && !cond->val_int())
continue;
if (num_rows >= offset_limit_cnt)
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index 7eff6a5e0fa..5658a3578ab 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -294,13 +294,13 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
rkey_id->store((longlong) key_id, TRUE);
rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW);
- int key_res= relations->file->index_read_map(relations->record[0],
- buff, (key_part_map) 1,
- HA_READ_KEY_EXACT);
+ int key_res= relations->file->ha_index_read_map(relations->record[0],
+ buff, (key_part_map) 1,
+ HA_READ_KEY_EXACT);
for ( ;
!key_res && key_id == (int16) rkey_id->val_int() ;
- key_res= relations->file->index_next(relations->record[0]))
+ key_res= relations->file->ha_index_next(relations->record[0]))
{
uchar topic_id_buff[8];
longlong topic_id= rtopic_id->val_int();
@@ -308,8 +308,8 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations,
field->store((longlong) topic_id, TRUE);
field->get_key_image(topic_id_buff, field->pack_length(), Field::itRAW);
- if (!topics->file->index_read_map(topics->record[0], topic_id_buff,
- (key_part_map)1, HA_READ_KEY_EXACT))
+ if (!topics->file->ha_index_read_map(topics->record[0], topic_id_buff,
+ (key_part_map)1, HA_READ_KEY_EXACT))
{
memorize_variant_topic(thd,topics,count,find_fields,
names,name,description,example);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 937576f4a68..278d0485c45 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -280,6 +280,9 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
}
}
}
+ /* Mark virtual columns used in the insert statement */
+ if (table->vfield)
+ table->mark_virtual_columns_for_write();
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
@@ -1425,7 +1428,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
goto err;
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
- if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
+ if (table->file->ha_rnd_pos(table->record[1],table->file->dup_ref))
goto err;
}
else
@@ -1446,9 +1449,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
}
key_copy((uchar*) key,table->record[0],table->key_info+key_nr,0);
- if ((error=(table->file->index_read_idx_map(table->record[1],key_nr,
- (uchar*) key, HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))))
+ if ((error= (table->file->ha_index_read_idx_map(table->record[1],
+ key_nr, (uchar*) key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))))
goto err;
}
if (info->handle_duplicates == DUP_UPDATE)
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 6d047197992..7b628c23156 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -340,6 +340,7 @@ void lex_start(THD *thd)
lex->reset_query_tables_list(FALSE);
lex->expr_allows_subselect= TRUE;
lex->use_only_table_context= FALSE;
+ lex->parse_vcol_expr= FALSE;
lex->name.str= 0;
lex->name.length= 0;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 5e37e618250..e67be64c847 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -118,6 +118,8 @@ enum enum_sql_command {
SQLCOM_SHOW_CREATE_TRIGGER,
SQLCOM_ALTER_DB_UPGRADE,
SQLCOM_SHOW_PROFILE, SQLCOM_SHOW_PROFILES,
+ SQLCOM_SHOW_USER_STATS, SQLCOM_SHOW_TABLE_STATS, SQLCOM_SHOW_INDEX_STATS,
+ SQLCOM_SHOW_CLIENT_STATS,
/*
When a command is added here, be sure it's also added in mysqld.cc
@@ -1544,6 +1546,7 @@ typedef struct st_lex : public Query_tables_list
LEX_USER *grant_user;
XID *xid;
THD *thd;
+ Virtual_column_info *vcol_info;
/* maintain a list of used plugins for this LEX */
DYNAMIC_ARRAY plugins;
@@ -1626,6 +1629,14 @@ typedef struct st_lex : public Query_tables_list
syntax error back.
*/
bool expr_allows_subselect;
+ /*
+ A special command "PARSE_VCOL_EXPR" is defined for the parser
+ to translate a defining expression of a virtual column into an
+ Item object.
+ The following flag is used to prevent other applications to use
+ this command.
+ */
+ bool parse_vcol_expr;
thr_lock_type lock_option;
enum SSL_type ssl_type; /* defined in violite.h */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index d3eac03efa0..739ac692772 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -338,10 +338,14 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_SHOW_CREATE_EVENT]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROFILES]= CF_STATUS_COMMAND;
sql_command_flags[SQLCOM_SHOW_PROFILE]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_CLIENT_STATS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_USER_STATS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_TABLE_STATS]= CF_STATUS_COMMAND;
+ sql_command_flags[SQLCOM_SHOW_INDEX_STATS]= CF_STATUS_COMMAND;
- sql_command_flags[SQLCOM_SHOW_TABLES]= (CF_STATUS_COMMAND |
- CF_SHOW_TABLE_COMMAND |
- CF_REEXECUTION_FRAGILE);
+ sql_command_flags[SQLCOM_SHOW_TABLES]= (CF_STATUS_COMMAND |
+ CF_SHOW_TABLE_COMMAND |
+ CF_REEXECUTION_FRAGILE);
sql_command_flags[SQLCOM_SHOW_TABLE_STATUS]= (CF_STATUS_COMMAND |
CF_SHOW_TABLE_COMMAND |
CF_REEXECUTION_FRAGILE);
@@ -571,7 +575,6 @@ end:
return 0;
}
-
/**
@brief Check access privs for a MERGE table and fix children lock types.
@@ -823,6 +826,8 @@ bool do_command(THD *thd)
net_new_transaction(net);
+ /* Save for user statistics */
+ thd->start_bytes_received= thd->status_var.bytes_received;
packet_length= my_net_read(net);
#if defined(ENABLED_PROFILING) && defined(COMMUNITY_SERVER)
thd->profiling.start_new_query();
@@ -1346,7 +1351,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
table_list.select_lex= &(thd->lex->select_lex);
lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, opt_userstat_running);
thd->lex->
select_lex.table_list.link_in_list((uchar*) &table_list,
@@ -1652,6 +1657,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* Free tables */
close_thread_tables(thd);
+ /* Update status; Must be done after close_thread_tables */
+ thd->update_all_stats();
+
log_slow_statement(thd);
thd_proc_info(thd, "cleaning up");
@@ -1821,6 +1829,12 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
thd->profiling.discard_current_query();
#endif
break;
+ case SCH_USER_STATS:
+ case SCH_CLIENT_STATS:
+ if (check_global_access(thd, SUPER_ACL | PROCESS_ACL))
+ DBUG_RETURN(1);
+ case SCH_TABLE_STATS:
+ case SCH_INDEX_STATS:
case SCH_OPEN_TABLES:
case SCH_VARIABLES:
case SCH_STATUS:
@@ -2262,6 +2276,10 @@ mysql_execute_command(THD *thd)
case SQLCOM_SHOW_COLLATIONS:
case SQLCOM_SHOW_STORAGE_ENGINES:
case SQLCOM_SHOW_PROFILE:
+ case SQLCOM_SHOW_CLIENT_STATS:
+ case SQLCOM_SHOW_USER_STATS:
+ case SQLCOM_SHOW_TABLE_STATS:
+ case SQLCOM_SHOW_INDEX_STATS:
case SQLCOM_SELECT:
thd->status_var.last_query_cost= 0.0;
if (all_tables)
@@ -5103,6 +5121,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
delete result;
}
}
+ /* Count number of empty select queries */
+ if (!thd->sent_row_count)
+ status_var_increment(thd->status_var.empty_queries);
+ status_var_add(thd->status_var.rows_sent, thd->sent_row_count);
return res;
}
@@ -5262,6 +5284,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
if (!no_errors)
{
const char *db_name= db ? db : thd->db;
+ status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->priv_host, db_name);
}
@@ -5294,12 +5317,15 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
{ // We can never grant this
DBUG_PRINT("error",("No possible access"));
if (!no_errors)
+ {
+ status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_ACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user,
sctx->priv_host,
(thd->password ?
ER(ER_YES) :
ER(ER_NO))); /* purecov: tested */
+ }
DBUG_RETURN(TRUE); /* purecov: tested */
}
@@ -5325,11 +5351,14 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv,
DBUG_PRINT("error",("Access denied"));
if (!no_errors)
+ {
+ status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->priv_host,
(db ? db : (thd->db ?
thd->db :
"unknown"))); /* purecov: tested */
+ }
DBUG_RETURN(TRUE); /* purecov: tested */
}
@@ -5358,6 +5387,7 @@ static bool check_show_access(THD *thd, TABLE_LIST *table)
if (!thd->col_access && check_grant_db(thd, dst_db_name))
{
+ status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
thd->security_ctx->priv_user,
thd->security_ctx->priv_host,
@@ -5420,14 +5450,14 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
{
TABLE_LIST *org_tables= tables;
TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table();
- uint i= 0;
Security_context *sctx= thd->security_ctx, *backup_ctx= thd->security_ctx;
+ uint i;
/*
The check that first_not_own_table is not reached is for the case when
the given table list refers to the list for prelocking (contains tables
of other queries). For simple queries first_not_own_table is 0.
*/
- for (; i < number && tables != first_not_own_table;
+ for (i=0; i < number && tables != first_not_own_table;
tables= tables->next_global, i++)
{
if (tables->security_ctx)
@@ -5439,9 +5469,12 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables,
(want_access & ~(SELECT_ACL | EXTRA_ACL | FILE_ACL)))
{
if (!no_errors)
+ {
+ status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->priv_host,
INFORMATION_SCHEMA_NAME.str);
+ }
return TRUE;
}
/*
@@ -5605,6 +5638,7 @@ bool check_global_access(THD *thd, ulong want_access)
return 0;
get_privilege_desc(command, sizeof(command), want_access);
my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command);
+ status_var_increment(thd->status_var.access_denied_errors);
return 1;
#else
return 0;
@@ -5708,7 +5742,7 @@ bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize)
Call it after we use THD for queries, not before.
*/
-void mysql_reset_thd_for_next_command(THD *thd)
+void mysql_reset_thd_for_next_command(THD *thd, my_bool calculate_userstat)
{
DBUG_ENTER("mysql_reset_thd_for_next_command");
DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */
@@ -5753,6 +5787,15 @@ void mysql_reset_thd_for_next_command(THD *thd)
thd->total_warn_count=0; // Warnings for this query
thd->rand_used= 0;
thd->sent_row_count= thd->examined_row_count= 0;
+
+ /* Copy data for user stats */
+ if ((thd->userstat_running= calculate_userstat))
+ {
+ thd->start_cpu_time= my_getcputime();
+ memcpy(&thd->org_status_var, &thd->status_var, sizeof(thd->status_var));
+ thd->select_commands= thd->update_commands= thd->other_commands= 0;
+ }
+
thd->query_plan_flags= QPLAN_INIT;
thd->query_plan_fsort_passes= 0;
@@ -5951,7 +5994,6 @@ void mysql_parse(THD *thd, const char *inBuf, uint length,
const char ** found_semicolon)
{
DBUG_ENTER("mysql_parse");
-
DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on(););
/*
@@ -5971,7 +6013,7 @@ void mysql_parse(THD *thd, const char *inBuf, uint length,
FIXME: cleanup the dependencies in the code to simplify this.
*/
lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, opt_userstat_running);
if (query_cache_send_result_to_client(thd, (char*) inBuf, length) <= 0)
{
@@ -6044,10 +6086,11 @@ void mysql_parse(THD *thd, const char *inBuf, uint length,
}
else
{
+ /* Update statistics for getting the query from the cache */
+ thd->lex->sql_command= SQLCOM_SELECT;
/* There are no multi queries in the cache. */
*found_semicolon= NULL;
}
-
DBUG_VOID_RETURN;
}
@@ -6071,7 +6114,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length)
Parser_state parser_state(thd, inBuf, length);
lex_start(thd);
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, 0);
if (!parse_sql(thd, & parser_state, NULL) &&
all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first))
@@ -6098,7 +6141,8 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
LEX_STRING *comment,
char *change,
List<String> *interval_list, CHARSET_INFO *cs,
- uint uint_geom_type)
+ uint uint_geom_type,
+ Virtual_column_info *vcol_info)
{
register Create_field *new_field;
LEX *lex= thd->lex;
@@ -6184,7 +6228,7 @@ bool add_field_to_list(THD *thd, LEX_STRING *field_name, enum_field_types type,
if (!(new_field= new Create_field()) ||
new_field->init(thd, field_name->str, type, length, decimals, type_modifier,
default_value, on_update_value, comment, change,
- interval_list, cs, uint_geom_type))
+ interval_list, cs, uint_geom_type, vcol_info))
DBUG_RETURN(1);
lex->alter_info.create_list.push_back(new_field);
@@ -6911,6 +6955,13 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
if (flush_error_log())
result=1;
}
+ if (((options & (REFRESH_SLOW_QUERY_LOG | REFRESH_LOG)) ==
+ REFRESH_SLOW_QUERY_LOG))
+ {
+ /* We are only flushing slow query log */
+ logger.flush_slow_log(thd);
+ }
+
#ifdef HAVE_QUERY_CACHE
if (options & REFRESH_QUERY_CACHE_FREE)
{
@@ -6993,26 +7044,55 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables,
}
#endif
#ifdef OPENSSL
- if (options & REFRESH_DES_KEY_FILE)
- {
- if (des_key_file && load_des_key_file(des_key_file))
- result= 1;
- }
+ if (options & REFRESH_DES_KEY_FILE)
+ {
+ if (des_key_file && load_des_key_file(des_key_file))
+ result= 1;
+ }
#endif
#ifdef HAVE_REPLICATION
- if (options & REFRESH_SLAVE)
- {
- tmp_write_to_binlog= 0;
- pthread_mutex_lock(&LOCK_active_mi);
- if (reset_slave(thd, active_mi))
- result=1;
- pthread_mutex_unlock(&LOCK_active_mi);
- }
+ if (options & REFRESH_SLAVE)
+ {
+ tmp_write_to_binlog= 0;
+ pthread_mutex_lock(&LOCK_active_mi);
+ if (reset_slave(thd, active_mi))
+ result=1;
+ pthread_mutex_unlock(&LOCK_active_mi);
+ }
#endif
- if (options & REFRESH_USER_RESOURCES)
- reset_mqh((LEX_USER *) NULL, 0); /* purecov: inspected */
- *write_to_binlog= tmp_write_to_binlog;
- return result;
+ if (options & REFRESH_USER_RESOURCES)
+ reset_mqh((LEX_USER *) NULL, 0); /* purecov: inspected */
+ if (options & REFRESH_TABLE_STATS)
+ {
+ pthread_mutex_lock(&LOCK_global_table_stats);
+ free_global_table_stats();
+ init_global_table_stats();
+ pthread_mutex_unlock(&LOCK_global_table_stats);
+ }
+ if (options & REFRESH_INDEX_STATS)
+ {
+ pthread_mutex_lock(&LOCK_global_index_stats);
+ free_global_index_stats();
+ init_global_index_stats();
+ pthread_mutex_unlock(&LOCK_global_index_stats);
+ }
+ if (options & (REFRESH_USER_STATS | REFRESH_CLIENT_STATS))
+ {
+ pthread_mutex_lock(&LOCK_global_user_client_stats);
+ if (options & REFRESH_USER_STATS)
+ {
+ free_global_user_stats();
+ init_global_user_stats();
+ }
+ if (options & REFRESH_CLIENT_STATS)
+ {
+ free_global_client_stats();
+ init_global_client_stats();
+ }
+ pthread_mutex_unlock(&LOCK_global_user_client_stats);
+ }
+ *write_to_binlog= tmp_write_to_binlog;
+ return result;
}
@@ -7048,7 +7128,6 @@ uint kill_one_thread(THD *thd, ulong id, bool only_kill_query)
VOID(pthread_mutex_unlock(&LOCK_thread_count));
if (tmp)
{
-
/*
If we're SUPER, we can KILL anything, including system-threads.
No further checks.
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 2b589c0c1b2..076ff96ca88 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -975,7 +975,8 @@ bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
thd->lex->use_only_table_context= TRUE;
thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS;
- error= func_expr->fix_fields(thd, (Item**)&func_expr);
+ if (!(error= func_expr->fix_fields(thd, (Item**)&func_expr)))
+ func_expr->walk(&Item::vcol_in_partition_func_processor, 0, NULL);
thd->lex->use_only_table_context= save_use_only_table_context;
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 37e5a583840..646cb2e8563 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -1782,10 +1782,10 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
table->use_all_columns();
table->field[0]->store(name->str, name->length, system_charset_info);
- if (! table->file->index_read_idx_map(table->record[0], 0,
- (uchar *)table->field[0]->ptr,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (! table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar *)table->field[0]->ptr,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
int error;
/*
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index ead31436dc7..56f8311e1f6 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2067,14 +2067,13 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
Prepared_statement *stmt;
bool error;
DBUG_ENTER("mysqld_stmt_prepare");
-
DBUG_PRINT("prep_query", ("%s", packet));
/* First of all clear possible warnings from the previous command */
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, opt_userstat_running);
if (! (stmt= new Prepared_statement(thd)))
- DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */
+ goto end; /* out of memory: error is set in Sql_alloc */
if (thd->stmt_map.insert(thd, stmt))
{
@@ -2082,7 +2081,7 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
The error is set in the insert. The statement itself
will be also deleted there (this is how the hash works).
*/
- DBUG_VOID_RETURN;
+ goto end;
}
/* Reset warnings from previous command */
@@ -2109,6 +2108,7 @@ void mysqld_stmt_prepare(THD *thd, const char *packet, uint packet_length)
thd->protocol= save_protocol;
/* check_prepared_statemnt sends the metadata packet in case of success */
+end:
DBUG_VOID_RETURN;
}
@@ -2453,7 +2453,7 @@ void mysqld_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
packet+= 9; /* stmt_id + 5 bytes of flags */
/* First of all clear possible warnings from the previous command */
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, opt_userstat_running);
if (!(stmt= find_prepared_statement(thd, stmt_id)))
{
@@ -2552,7 +2552,8 @@ void mysqld_stmt_fetch(THD *thd, char *packet, uint packet_length)
DBUG_ENTER("mysqld_stmt_fetch");
/* First of all clear possible warnings from the previous command */
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, opt_userstat_running);
+
status_var_increment(thd->status_var.com_stmt_fetch);
if (!(stmt= find_prepared_statement(thd, stmt_id)))
{
@@ -2618,7 +2619,7 @@ void mysqld_stmt_reset(THD *thd, char *packet)
DBUG_ENTER("mysqld_stmt_reset");
/* First of all clear possible warnings from the previous command */
- mysql_reset_thd_for_next_command(thd);
+ mysql_reset_thd_for_next_command(thd, opt_userstat_running);
status_var_increment(thd->status_var.com_stmt_reset);
if (!(stmt= find_prepared_statement(thd, stmt_id)))
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 5f12bb3e8b5..014402733c7 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -9824,6 +9824,10 @@ void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
bitmap_init(&table->tmp_set,
(my_bitmap_map*) (bitmaps+ bitmap_buffer_size(field_count)),
field_count, FALSE);
+ bitmap_init(&table->vcol_set,
+ (my_bitmap_map*) (bitmaps+ 2+bitmap_buffer_size(field_count)),
+ field_count, FALSE);
+
/* write_set and all_set are copies of read_set */
table->def_write_set= table->def_read_set;
table->s->all_set= table->def_read_set;
@@ -9972,7 +9976,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&tmpname, (uint) strlen(tmp_table_name)+1,
&group_buff, (group && ! using_unique_constraint ?
param->group_length : 0),
- &bitmaps, bitmap_buffer_size(field_count)*2,
+ &bitmaps, bitmap_buffer_size(field_count)*3,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
@@ -10577,7 +10581,7 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
&share, sizeof(*share),
&field, (field_count + 1) * sizeof(Field*),
&blob_field, (field_count+1) *sizeof(uint),
- &bitmaps, bitmap_buffer_size(field_count)*2,
+ &bitmaps, bitmap_buffer_size(field_count)*3,
NullS))
return 0;
@@ -10668,8 +10672,9 @@ error:
static bool open_tmp_table(TABLE *table)
{
int error;
- if ((error=table->file->ha_open(table, table->s->table_name.str,O_RDWR,
- HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
+ if ((error= table->file->ha_open(table, table->s->table_name.str, O_RDWR,
+ HA_OPEN_TMP_TABLE |
+ HA_OPEN_INTERNAL_TABLE)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
table->db_stat=0;
@@ -11014,7 +11019,7 @@ create_internal_tmp_table_from_heap2(THD *thd, TABLE *table,
is safe as this is a temporary MyISAM table without timestamp/autoincrement
or partitioning.
*/
- while (!table->file->rnd_next(new_table.record[1]))
+ while (!table->file->ha_rnd_next(new_table.record[1]))
{
write_err= new_table.file->ha_write_row(new_table.record[1]);
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
@@ -11548,6 +11553,8 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
}
DBUG_PRINT("info", ("select cond 0x%lx", (ulong)select_cond));
+ update_virtual_fields(join_tab->table);
+
if (select_cond)
{
select_cond_result= test(select_cond->val_int());
@@ -11766,6 +11773,8 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
return NESTED_LOOP_KILLED; // Aborted by user /* purecov: inspected */
}
SQL_SELECT *select=join_tab->select;
+ if (rc == NESTED_LOOP_OK)
+ update_virtual_fields(join_tab->table);
if (rc == NESTED_LOOP_OK &&
(!join_tab->cache.select || !join_tab->cache.select->skip_record()))
{
@@ -11828,10 +11837,10 @@ int safe_index_read(JOIN_TAB *tab)
{
int error;
TABLE *table= tab->table;
- if ((error=table->file->index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT)))
return report_error(table, error);
return 0;
}
@@ -11940,8 +11949,8 @@ join_read_system(JOIN_TAB *tab)
int error;
if (table->status & STATUS_GARBAGE) // If first read
{
- if ((error=table->file->read_first_row(table->record[0],
- table->s->primary_key)))
+ if ((error= table->file->ha_read_first_row(table->record[0],
+ table->s->primary_key)))
{
if (error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -11949,6 +11958,7 @@ join_read_system(JOIN_TAB *tab)
empty_record(table); // Make empty record
return -1;
}
+ update_virtual_fields(table);
store_record(table,record[1]);
}
else if (!table->status) // Only happens with left join
@@ -11983,10 +11993,10 @@ join_read_const(JOIN_TAB *tab)
error=HA_ERR_KEY_NOT_FOUND;
else
{
- error=table->file->index_read_idx_map(table->record[0],tab->ref.key,
- (uchar*) tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_idx_map(table->record[0],tab->ref.key,
+ (uchar*) tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT);
}
if (error)
{
@@ -11997,6 +12007,7 @@ join_read_const(JOIN_TAB *tab)
return report_error(table, error);
return -1;
}
+ update_virtual_fields(table);
store_record(table,record[1]);
}
else if (!(table->status & ~STATUS_NULL_ROW)) // Only happens with left join
@@ -12040,10 +12051,10 @@ join_read_key(JOIN_TAB *tab)
tab->read_record.file->unlock_row();
tab->ref.has_record= FALSE;
}
- error=table->file->index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT);
+ error= table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT);
if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -12124,10 +12135,10 @@ join_read_always_key(JOIN_TAB *tab)
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
- if ((error=table->file->index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT)))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -12158,9 +12169,9 @@ join_read_last_key(JOIN_TAB *tab)
}
if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
- if ((error=table->file->index_read_last_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts))))
+ if ((error= table->file->ha_index_read_last_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts))))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -12185,9 +12196,9 @@ join_read_next_same(READ_RECORD *info)
TABLE *table= info->table;
JOIN_TAB *tab=table->reginfo.join_tab;
- if ((error=table->file->index_next_same(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length)))
+ if ((error= table->file->ha_index_next_same(table->record[0],
+ tab->ref.key_buff,
+ tab->ref.key_length)))
{
if (error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -12205,7 +12216,7 @@ join_read_prev_same(READ_RECORD *info)
TABLE *table= info->table;
JOIN_TAB *tab=table->reginfo.join_tab;
- if ((error=table->file->index_prev(table->record[0])))
+ if ((error= table->file->ha_index_prev(table->record[0])))
return report_error(table, error);
if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key,
tab->ref.key_length))
@@ -12277,7 +12288,7 @@ join_read_first(JOIN_TAB *tab)
error= table->file->ha_index_init(tab->index, tab->sorted);
if (!error)
error= table->file->prepare_index_scan();
- if (error || (error=tab->table->file->index_first(tab->table->record[0])))
+ if (error || (error=tab->table->file->ha_index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
report_error(table, error);
@@ -12291,8 +12302,9 @@ static int
join_read_next(READ_RECORD *info)
{
int error;
- if ((error=info->file->index_next(info->record)))
+ if ((error= info->file->ha_index_next(info->record)))
return report_error(info->table, error);
+
return 0;
}
@@ -12318,8 +12330,9 @@ join_read_last(JOIN_TAB *tab)
error= table->file->ha_index_init(tab->index, 1);
if (!error)
error= table->file->prepare_index_scan();
- if (error || (error= tab->table->file->index_last(tab->table->record[0])))
+ if (error || (error= tab->table->file->ha_index_last(tab->table->record[0])))
return report_error(table, error);
+
return 0;
}
@@ -12328,7 +12341,7 @@ static int
join_read_prev(READ_RECORD *info)
{
int error;
- if ((error= info->file->index_prev(info->record)))
+ if ((error= info->file->ha_index_prev(info->record)))
return report_error(info->table, error);
return 0;
}
@@ -12353,7 +12366,7 @@ join_ft_read_first(JOIN_TAB *tab)
#endif
table->file->ft_init();
- if ((error= table->file->ft_read(table->record[0])))
+ if ((error= table->file->ha_ft_read(table->record[0])))
return report_error(table, error);
return 0;
}
@@ -12362,7 +12375,7 @@ static int
join_ft_read_next(READ_RECORD *info)
{
int error;
- if ((error= info->file->ft_read(info->table->record[0])))
+ if ((error= info->file->ha_ft_read(info->table->record[0])))
return report_error(info->table, error);
return 0;
}
@@ -12654,7 +12667,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
join->found_records++;
- if ((error=table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->ha_write_row(table->record[0])))
{
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
@@ -12709,15 +12722,15 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (item->maybe_null)
group->buff[-1]= (char) group->field->is_null();
}
- if (!table->file->index_read_map(table->record[1],
- join->tmp_table_param.group_buff,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (!table->file->ha_index_read_map(table->record[1],
+ join->tmp_table_param.group_buff,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error=table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error= table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -12740,7 +12753,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
init_tmptable_sum_functions(join->sum_funcs);
copy_funcs(join->tmp_table_param.items_to_copy);
- if ((error=table->file->ha_write_row(table->record[0])))
+ if ((error= table->file->ha_write_row(table->record[0])))
{
if (create_internal_tmp_table_from_heap(join->thd, table,
&join->tmp_table_param,
@@ -12781,7 +12794,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_fields(&join->tmp_table_param); // Groups are copied twice.
copy_funcs(join->tmp_table_param.items_to_copy);
- if (!(error=table->file->ha_write_row(table->record[0])))
+ if (!(error= table->file->ha_write_row(table->record[0])))
join->send_records++; // New group
else
{
@@ -12790,15 +12803,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
- if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
+ if (table->file->ha_rnd_pos(table->record[1],table->file->dup_ref))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error=table->file->ha_update_row(table->record[1],
- table->record[0])))
+ if ((error= table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -14135,7 +14148,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
new_record=(char*) table->record[1]+offset;
file->ha_rnd_init(1);
- error=file->rnd_next(record);
+ error= file->ha_rnd_next(record);
for (;;)
{
if (thd->killed)
@@ -14148,7 +14161,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
{
if (error == HA_ERR_RECORD_DELETED)
{
- error= file->rnd_next(record);
+ error= file->ha_rnd_next(record);
continue;
}
if (error == HA_ERR_END_OF_FILE)
@@ -14157,9 +14170,9 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (having && !having->val_int())
{
- if ((error=file->ha_delete_row(record)))
+ if ((error= file->ha_delete_row(record)))
goto err;
- error=file->rnd_next(record);
+ error= file->ha_rnd_next(record);
continue;
}
if (copy_blobs(first_field))
@@ -14174,7 +14187,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
bool found=0;
for (;;)
{
- if ((error=file->rnd_next(record)))
+ if ((error= file->ha_rnd_next(record)))
{
if (error == HA_ERR_RECORD_DELETED)
continue;
@@ -14184,7 +14197,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (compare_record(table, first_field) == 0)
{
- if ((error=file->ha_delete_row(record)))
+ if ((error= file->ha_delete_row(record)))
goto err;
}
else if (!found)
@@ -14274,7 +14287,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
error=0;
goto err;
}
- if ((error=file->rnd_next(record)))
+ if ((error= file->ha_rnd_next(record)))
{
if (error == HA_ERR_RECORD_DELETED)
continue;
@@ -14284,7 +14297,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
}
if (having && !having->val_int())
{
- if ((error=file->ha_delete_row(record)))
+ if ((error= file->ha_delete_row(record)))
goto err;
continue;
}
@@ -14301,7 +14314,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
- if ((error=file->ha_delete_row(record)))
+ if ((error= file->ha_delete_row(record)))
goto err;
}
else
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index f8a8dea18ff..58622699984 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -520,10 +520,10 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server)
system_charset_info);
/* read index until record is that specified in server_name */
- if ((error= table->file->index_read_idx_map(table->record[0], 0,
- (uchar *)table->field[0]->ptr,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar *)table->field[0]->ptr,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT)))
{
/* if not found, err */
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@@ -863,10 +863,10 @@ update_server_record(TABLE *table, FOREIGN_SERVER *server)
server->server_name_length,
system_charset_info);
- if ((error= table->file->index_read_idx_map(table->record[0], 0,
- (uchar *)table->field[0]->ptr,
- ~(longlong)0,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar *)table->field[0]->ptr,
+ ~(longlong)0,
+ HA_READ_KEY_EXACT)))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
table->file->print_error(error, MYF(0));
@@ -920,10 +920,10 @@ delete_server_record(TABLE *table,
/* set the field that's the PK to the value we're looking for */
table->field[0]->store(server_name, server_name_length, system_charset_info);
- if ((error= table->file->index_read_idx_map(table->record[0], 0,
- (uchar *)table->field[0]->ptr,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT)))
+ if ((error= table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar *)table->field[0]->ptr,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT)))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
table->file->print_error(error, MYF(0));
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 01d82f994e7..0e05d25af18 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -821,6 +821,7 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
sctx->master_access);
if (!(db_access & DB_ACLS) && check_grant_db(thd,dbname))
{
+ status_var_increment(thd->status_var.access_denied_errors);
my_error(ER_DBACCESS_DENIED_ERROR, MYF(0),
sctx->priv_user, sctx->host_or_ip, dbname);
general_log_print(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR),
@@ -1292,6 +1293,19 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
field->sql_type(type);
packet->append(type.ptr(), type.length(), system_charset_info);
+ if (field->vcol_info)
+ {
+ packet->append(STRING_WITH_LEN(" AS ("));
+ packet->append(field->vcol_info->expr_str.str,
+ field->vcol_info->expr_str.length,
+ system_charset_info);
+ packet->append(STRING_WITH_LEN(")"));
+ if (field->stored_in_db)
+ packet->append(STRING_WITH_LEN(" PERSISTENT"));
+ else
+ packet->append(STRING_WITH_LEN(" VIRTUAL"));
+ }
+
if (field->has_charset() &&
!(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)))
{
@@ -1322,7 +1336,8 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" NULL"));
}
- if (get_field_default_value(thd, table, field, &def_value, 1))
+ if (!field->vcol_info &&
+ get_field_default_value(thd, table, field, &def_value, 1))
{
packet->append(STRING_WITH_LEN(" DEFAULT "));
packet->append(def_value.ptr(), def_value.length(), system_charset_info);
@@ -2204,11 +2219,6 @@ void remove_status_vars(SHOW_VAR *list)
}
}
-inline void make_upper(char *buf)
-{
- for (; *buf; buf++)
- *buf= my_toupper(system_charset_info, *buf);
-}
static bool show_status_array(THD *thd, const char *wild,
SHOW_VAR *variables,
@@ -2247,7 +2257,7 @@ static bool show_status_array(THD *thd, const char *wild,
strnmov(prefix_end, variables->name, len);
name_buffer[sizeof(name_buffer)-1]=0; /* Safety */
if (ucase_names)
- make_upper(name_buffer);
+ my_caseup_str(system_charset_info, name_buffer);
restore_record(table, s->default_values);
table->field[0]->store(name_buffer, strlen(name_buffer),
@@ -2374,6 +2384,323 @@ end:
DBUG_RETURN(res);
}
+#ifdef COMPLEAT_PATCH_NOT_ADDED_YET
+/*
+ Aggregate values for mapped_user entries by their role.
+
+ SYNOPSIS
+ aggregate_user_stats
+ all_user_stats - input to aggregate
+ agg_user_stats - returns aggregated values
+
+ RETURN
+ 0 - OK
+ 1 - error
+*/
+
+static int aggregate_user_stats(HASH *all_user_stats, HASH *agg_user_stats)
+{
+ DBUG_ENTER("aggregate_user_stats");
+ if (hash_init(agg_user_stats, system_charset_info,
+ max(all_user_stats->records, 1),
+ 0, 0, (hash_get_key)get_key_user_stats,
+ (hash_free_key)free_user_stats, 0))
+ {
+ sql_print_error("Malloc in aggregate_user_stats failed");
+ DBUG_RETURN(1);
+ }
+
+ for (uint i= 0; i < all_user_stats->records; i++)
+ {
+ USER_STATS *user= (USER_STATS*)hash_element(all_user_stats, i);
+ USER_STATS *agg_user;
+ uint name_length= strlen(user->priv_user);
+
+ if (!(agg_user= (USER_STATS*) hash_search(agg_user_stats,
+ (uchar*)user->priv_user,
+ name_length)))
+ {
+ // First entry for this role.
+ if (!(agg_user= (USER_STATS*) my_malloc(sizeof(USER_STATS),
+ MYF(MY_WME | MY_ZEROFILL))))
+ {
+ sql_print_error("Malloc in aggregate_user_stats failed");
+ DBUG_RETURN(1);
+ }
+
+ init_user_stats(agg_user, user->priv_user, name_length,
+ user->priv_user,
+ user->total_connections, user->concurrent_connections,
+ user->connected_time, user->busy_time, user->cpu_time,
+ user->bytes_received, user->bytes_sent,
+ user->binlog_bytes_written,
+ user->rows_sent, user->rows_read,
+ user->rows_inserted, user->rows_deleted,
+ user->rows_updated,
+ user->select_commands, user->update_commands,
+ user->other_commands,
+ user->commit_trans, user->rollback_trans,
+ user->denied_connections, user->lost_connections,
+ user->access_denied_errors, user->empty_queries);
+
+ if (my_hash_insert(agg_user_stats, (uchar*) agg_user))
+ {
+ /* Out of memory */
+ my_free(agg_user, 0);
+ sql_print_error("Malloc in aggregate_user_stats failed");
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ {
+ /* Aggregate with existing values for this role. */
+ add_user_stats(agg_user,
+ user->total_connections, user->concurrent_connections,
+ user->connected_time, user->busy_time, user->cpu_time,
+ user->bytes_received, user->bytes_sent,
+ user->binlog_bytes_written,
+ user->rows_sent, user->rows_read,
+ user->rows_inserted, user->rows_deleted,
+ user->rows_updated,
+ user->select_commands, user->update_commands,
+ user->other_commands,
+ user->commit_trans, user->rollback_trans,
+ user->denied_connections, user->lost_connections,
+ user->access_denied_errors, user->empty_queries);
+ }
+ }
+ DBUG_PRINT("exit", ("aggregated %lu input into %lu output entries",
+ all_user_stats->records, agg_user_stats->records));
+ DBUG_RETURN(0);
+}
+#endif
+
+/*
+ Write result to network for SHOW USER_STATISTICS
+
+ SYNOPSIS
+ send_user_stats
+ all_user_stats - values to return
+ table - I_S table
+
+ RETURN
+ 0 - OK
+ 1 - error
+*/
+
+int send_user_stats(THD* thd, HASH *all_user_stats, TABLE *table)
+{
+ DBUG_ENTER("send_user_stats");
+
+ for (uint i= 0; i < all_user_stats->records; i++)
+ {
+ uint j= 0;
+ USER_STATS *user_stats= (USER_STATS*) hash_element(all_user_stats, i);
+
+ table->field[j++]->store(user_stats->user, user_stats->user_name_length,
+ system_charset_info);
+ table->field[j++]->store((longlong)user_stats->total_connections,TRUE);
+ table->field[j++]->store((longlong)user_stats->concurrent_connections);
+ table->field[j++]->store((longlong)user_stats->connected_time);
+ table->field[j++]->store((double)user_stats->busy_time);
+ table->field[j++]->store((double)user_stats->cpu_time);
+ table->field[j++]->store((longlong)user_stats->bytes_received, TRUE);
+ table->field[j++]->store((longlong)user_stats->bytes_sent, TRUE);
+ table->field[j++]->store((longlong)user_stats->binlog_bytes_written, TRUE);
+ table->field[j++]->store((longlong)user_stats->rows_read, TRUE);
+ table->field[j++]->store((longlong)user_stats->rows_sent, TRUE);
+ table->field[j++]->store((longlong)user_stats->rows_deleted, TRUE);
+ table->field[j++]->store((longlong)user_stats->rows_inserted, TRUE);
+ table->field[j++]->store((longlong)user_stats->rows_updated, TRUE);
+ table->field[j++]->store((longlong)user_stats->select_commands, TRUE);
+ table->field[j++]->store((longlong)user_stats->update_commands, TRUE);
+ table->field[j++]->store((longlong)user_stats->other_commands, TRUE);
+ table->field[j++]->store((longlong)user_stats->commit_trans, TRUE);
+ table->field[j++]->store((longlong)user_stats->rollback_trans, TRUE);
+ table->field[j++]->store((longlong)user_stats->denied_connections, TRUE);
+ table->field[j++]->store((longlong)user_stats->lost_connections, TRUE);
+ table->field[j++]->store((longlong)user_stats->access_denied_errors, TRUE);
+ table->field[j++]->store((longlong)user_stats->empty_queries, TRUE);
+ if (schema_table_store_record(thd, table))
+ {
+ DBUG_PRINT("error", ("store record error"));
+ DBUG_RETURN(1);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ Process SHOW USER_STATISTICS
+
+ SYNOPSIS
+ mysqld_show_user_stats
+ thd - current thread
+ wild - limit results to the entry for this user
+ with_roles - when true, display role for mapped users
+
+ RETURN
+ 0 - OK
+ 1 - error
+*/
+
+int fill_schema_user_stats(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+ TABLE *table= tables->table;
+ int result;
+ DBUG_ENTER("fill_schema_user_stats");
+
+ if (check_global_access(thd, SUPER_ACL | PROCESS_ACL))
+ DBUG_RETURN(1);
+
+ /*
+ Iterates through all the global stats and sends them to the client.
+ Pattern matching on the client IP is supported.
+ */
+
+ pthread_mutex_lock(&LOCK_global_user_client_stats);
+ result= send_user_stats(thd, &global_user_stats, table) != 0;
+ pthread_mutex_unlock(&LOCK_global_user_client_stats);
+
+ DBUG_PRINT("exit", ("result: %d", result));
+ DBUG_RETURN(result);
+}
+
+/*
+ Process SHOW CLIENT_STATISTICS
+
+ SYNOPSIS
+ mysqld_show_client_stats
+ thd - current thread
+ wild - limit results to the entry for this client
+
+ RETURN
+ 0 - OK
+ 1 - error
+*/
+
+int fill_schema_client_stats(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+ TABLE *table= tables->table;
+ int result;
+ DBUG_ENTER("fill_schema_client_stats");
+
+ if (check_global_access(thd, SUPER_ACL | PROCESS_ACL))
+ DBUG_RETURN(1);
+
+ /*
+ Iterates through all the global stats and sends them to the client.
+ Pattern matching on the client IP is supported.
+ */
+
+ pthread_mutex_lock(&LOCK_global_user_client_stats);
+ result= send_user_stats(thd, &global_client_stats, table) != 0;
+ pthread_mutex_unlock(&LOCK_global_user_client_stats);
+
+ DBUG_PRINT("exit", ("result: %d", result));
+ DBUG_RETURN(result);
+}
+
+
+/* Fill information schema table with table statistics */
+
+int fill_schema_table_stats(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ TABLE *table= tables->table;
+ DBUG_ENTER("fill_schema_table_stats");
+
+ pthread_mutex_lock(&LOCK_global_table_stats);
+ for (uint i= 0; i < global_table_stats.records; i++)
+ {
+ char *end_of_schema;
+ TABLE_STATS *table_stats=
+ (TABLE_STATS*)hash_element(&global_table_stats, i);
+ TABLE_LIST tmp_table;
+ size_t schema_length, table_name_length;
+
+ end_of_schema= strend(table_stats->table);
+ schema_length= (size_t) (end_of_schema - table_stats->table);
+ table_name_length= strlen(table_stats->table + schema_length + 1);
+
+ bzero((char*) &tmp_table,sizeof(tmp_table));
+ tmp_table.db= table_stats->table;
+ tmp_table.table_name= end_of_schema+1;
+ tmp_table.grant.privilege= 0;
+ if (check_access(thd, SELECT_ACL | EXTRA_ACL, tmp_table.db,
+ &tmp_table.grant.privilege, 0, 0,
+ is_schema_db(tmp_table.db)) ||
+ check_grant(thd, SELECT_ACL, &tmp_table, 1, UINT_MAX,
+ 1))
+ continue;
+
+ table->field[0]->store(table_stats->table, schema_length,
+ system_charset_info);
+ table->field[1]->store(table_stats->table + schema_length+1,
+ table_name_length, system_charset_info);
+ table->field[2]->store((longlong)table_stats->rows_read, TRUE);
+ table->field[3]->store((longlong)table_stats->rows_changed, TRUE);
+ table->field[4]->store((longlong)table_stats->rows_changed_x_indexes,
+ TRUE);
+ if (schema_table_store_record(thd, table))
+ {
+ VOID(pthread_mutex_unlock(&LOCK_global_table_stats));
+ DBUG_RETURN(1);
+ }
+ }
+ pthread_mutex_unlock(&LOCK_global_table_stats);
+ DBUG_RETURN(0);
+}
+
+
+/* Fill information schema table with index statistics */
+
+int fill_schema_index_stats(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ TABLE *table= tables->table;
+ DBUG_ENTER("fill_schema_index_stats");
+
+ pthread_mutex_lock(&LOCK_global_index_stats);
+ for (uint i= 0; i < global_index_stats.records; i++)
+ {
+ INDEX_STATS *index_stats =
+ (INDEX_STATS*) hash_element(&global_index_stats, i);
+ TABLE_LIST tmp_table;
+ char *index_name;
+ size_t schema_name_length, table_name_length, index_name_length;
+
+ bzero((char*) &tmp_table,sizeof(tmp_table));
+ tmp_table.db= index_stats->index;
+ tmp_table.table_name= strend(index_stats->index)+1;
+ tmp_table.grant.privilege= 0;
+ if (check_access(thd, SELECT_ACL | EXTRA_ACL, tmp_table.db,
+ &tmp_table.grant.privilege, 0, 0,
+ is_schema_db(tmp_table.db)) ||
+ check_grant(thd, SELECT_ACL, &tmp_table, 1, UINT_MAX, 1))
+ continue;
+
+ index_name= strend(tmp_table.table_name)+1;
+ schema_name_length= (tmp_table.table_name - index_stats->index) -1;
+ table_name_length= (index_name - tmp_table.table_name)-1;
+ index_name_length= (index_stats->index_name_length - schema_name_length -
+ table_name_length - 3);
+
+ table->field[0]->store(tmp_table.db, schema_name_length,
+ system_charset_info);
+ table->field[1]->store(tmp_table.table_name, table_name_length,
+ system_charset_info);
+ table->field[2]->store(index_name, index_name_length, system_charset_info);
+ table->field[3]->store((longlong)index_stats->rows_read, TRUE);
+
+ if (schema_table_store_record(thd, table))
+ {
+ VOID(pthread_mutex_unlock(&LOCK_global_index_stats));
+ DBUG_RETURN(1);
+ }
+ }
+ pthread_mutex_unlock(&LOCK_global_index_stats);
+ DBUG_RETURN(0);
+}
+
/* collect status for all running threads */
@@ -4014,6 +4341,8 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
field->unireg_check != Field::TIMESTAMP_DN_FIELD)
table->field[16]->store(STRING_WITH_LEN("on update CURRENT_TIMESTAMP"),
cs);
+ if (field->vcol_info)
+ table->field[16]->store(STRING_WITH_LEN("VIRTUAL"), cs);
table->field[18]->store(field->comment.str, field->comment.length, cs);
if (schema_table_store_record(thd, table))
@@ -4312,7 +4641,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
DBUG_RETURN(1);
}
proc_table->file->ha_index_init(0, 1);
- if ((res= proc_table->file->index_first(proc_table->record[0])))
+ if ((res= proc_table->file->ha_index_first(proc_table->record[0])))
{
res= (res == HA_ERR_END_OF_FILE) ? 0 : 1;
goto err;
@@ -4322,7 +4651,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond)
res= 1;
goto err;
}
- while (!proc_table->file->index_next(proc_table->record[0]))
+ while (!proc_table->file->ha_index_next(proc_table->record[0]))
{
if (store_schema_proc(thd, table, proc_table, wild, full_access, definer))
{
@@ -5568,6 +5897,81 @@ struct schema_table_ref
ST_SCHEMA_TABLE *schema_table;
};
+ST_FIELD_INFO user_stats_fields_info[]=
+{
+ {"USER", USERNAME_LENGTH, MYSQL_TYPE_STRING, 0, 0, "User", SKIP_OPEN_TABLE},
+ {"TOTAL_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Total_connections",SKIP_OPEN_TABLE},
+ {"CONCURRENT_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Concurrent_connections",SKIP_OPEN_TABLE},
+ {"CONNECTED_TIME", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Connected_time",SKIP_OPEN_TABLE},
+ {"BUSY_TIME", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_DOUBLE, 0, 0, "Busy_time",SKIP_OPEN_TABLE},
+ {"CPU_TIME", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_DOUBLE, 0, 0, "Cpu_time",SKIP_OPEN_TABLE},
+ {"BYTES_RECEIVED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Bytes_received",SKIP_OPEN_TABLE},
+ {"BYTES_SENT", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Bytes_sent",SKIP_OPEN_TABLE},
+ {"BINLOG_BYTES_WRITTEN", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Binlog_bytes_written",SKIP_OPEN_TABLE},
+ {"ROWS_READ", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_read",SKIP_OPEN_TABLE},
+ {"ROWS_SENT", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_sent",SKIP_OPEN_TABLE},
+ {"ROWS_DELETED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_deleted",SKIP_OPEN_TABLE},
+ {"ROWS_INSERTED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_inserted",SKIP_OPEN_TABLE},
+ {"ROWS_UPDATED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_updated",SKIP_OPEN_TABLE},
+ {"SELECT_COMMANDS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Select_commands",SKIP_OPEN_TABLE},
+ {"UPDATE_COMMANDS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Update_commands",SKIP_OPEN_TABLE},
+ {"OTHER_COMMANDS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Other_commands",SKIP_OPEN_TABLE},
+ {"COMMIT_TRANSACTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Commit_transactions",SKIP_OPEN_TABLE},
+ {"ROLLBACK_TRANSACTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rollback_transactions",SKIP_OPEN_TABLE},
+ {"DENIED_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Denied_connections",SKIP_OPEN_TABLE},
+ {"LOST_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Lost_connections",SKIP_OPEN_TABLE},
+ {"ACCESS_DENIED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Access_denied",SKIP_OPEN_TABLE},
+ {"EMPTY_QUERIES", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Empty_queries",SKIP_OPEN_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, 0}
+};
+
+ST_FIELD_INFO client_stats_fields_info[]=
+{
+ {"CLIENT", LIST_PROCESS_HOST_LEN, MYSQL_TYPE_STRING, 0, 0, "Client",SKIP_OPEN_TABLE},
+ {"TOTAL_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Total_connections",SKIP_OPEN_TABLE},
+ {"CONCURRENT_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Concurrent_connections",SKIP_OPEN_TABLE},
+ {"CONNECTED_TIME", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Connected_time",SKIP_OPEN_TABLE},
+ {"BUSY_TIME", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_DOUBLE, 0, 0, "Busy_time",SKIP_OPEN_TABLE},
+ {"CPU_TIME", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_DOUBLE, 0, 0, "Cpu_time",SKIP_OPEN_TABLE},
+ {"BYTES_RECEIVED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Bytes_received",SKIP_OPEN_TABLE},
+ {"BYTES_SENT", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Bytes_sent",SKIP_OPEN_TABLE},
+ {"BINLOG_BYTES_WRITTEN", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Binlog_bytes_written",SKIP_OPEN_TABLE},
+ {"ROWS_READ", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_read",SKIP_OPEN_TABLE},
+ {"ROWS_SENT", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_sent",SKIP_OPEN_TABLE},
+ {"ROWS_DELETED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_deleted",SKIP_OPEN_TABLE},
+ {"ROWS_INSERTED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_inserted",SKIP_OPEN_TABLE},
+ {"ROWS_UPDATED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_updated",SKIP_OPEN_TABLE},
+ {"SELECT_COMMANDS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Select_commands",SKIP_OPEN_TABLE},
+ {"UPDATE_COMMANDS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Update_commands",SKIP_OPEN_TABLE},
+ {"OTHER_COMMANDS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Other_commands",SKIP_OPEN_TABLE},
+ {"COMMIT_TRANSACTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Commit_transactions",SKIP_OPEN_TABLE},
+ {"ROLLBACK_TRANSACTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rollback_transactions",SKIP_OPEN_TABLE},
+ {"DENIED_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Denied_connections",SKIP_OPEN_TABLE},
+ {"LOST_CONNECTIONS", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Lost_connections",SKIP_OPEN_TABLE},
+ {"ACCESS_DENIED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Access_denied",SKIP_OPEN_TABLE},
+ {"EMPTY_QUERIES", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Empty_queries",SKIP_OPEN_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, 0}
+};
+
+
+ST_FIELD_INFO table_stats_fields_info[]=
+{
+ {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_schema",SKIP_OPEN_TABLE},
+ {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_name",SKIP_OPEN_TABLE},
+ {"ROWS_READ", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_read",SKIP_OPEN_TABLE},
+ {"ROWS_CHANGED", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_changed",SKIP_OPEN_TABLE},
+ {"ROWS_CHANGED_X_INDEXES", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_changed_x_#indexes",SKIP_OPEN_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0, 0}
+};
+
+ST_FIELD_INFO index_stats_fields_info[]=
+{
+ {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_schema",SKIP_OPEN_TABLE},
+ {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_name",SKIP_OPEN_TABLE},
+ {"INDEX_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Index_name",SKIP_OPEN_TABLE},
+ {"ROWS_READ", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Rows_read",SKIP_OPEN_TABLE},
+ {0, 0, MYSQL_TYPE_STRING, 0, 0, 0,0}
+};
/*
Find schema_tables elment by name
@@ -6795,6 +7199,8 @@ ST_SCHEMA_TABLE schema_tables[]=
{
{"CHARACTER_SETS", charsets_fields_info, create_schema_table,
fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0, 0},
+ {"CLIENT_STATISTICS", client_stats_fields_info, create_schema_table,
+ fill_schema_client_stats, make_old_format, 0, -1, -1, 0, 0},
{"COLLATIONS", collation_fields_info, create_schema_table,
fill_schema_collation, make_old_format, 0, -1, -1, 0, 0},
{"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info,
@@ -6819,6 +7225,8 @@ ST_SCHEMA_TABLE schema_tables[]=
fill_status, make_old_format, 0, 0, -1, 0, 0},
{"GLOBAL_VARIABLES", variables_fields_info, create_schema_table,
fill_variables, make_old_format, 0, 0, -1, 0, 0},
+ {"INDEX_STATISTICS", index_stats_fields_info, create_schema_table,
+ fill_schema_index_stats, make_old_format, 0, -1, -1, 0, 0},
{"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table,
get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0,
OPEN_TABLE_ONLY},
@@ -6860,11 +7268,15 @@ ST_SCHEMA_TABLE schema_tables[]=
get_all_tables, make_table_names_old_format, 0, 1, 2, 1, 0},
{"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table,
fill_schema_table_privileges, 0, 0, -1, -1, 0, 0},
+ {"TABLE_STATISTICS", table_stats_fields_info, create_schema_table,
+ fill_schema_table_stats, make_old_format, 0, -1, -1, 0, 0},
{"TRIGGERS", triggers_fields_info, create_schema_table,
get_all_tables, make_old_format, get_schema_triggers_record, 5, 6, 0,
OPEN_TABLE_ONLY},
{"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table,
fill_schema_user_privileges, 0, 0, -1, -1, 0, 0},
+ {"USER_STATISTICS", user_stats_fields_info, create_schema_table,
+ fill_schema_user_stats, make_old_format, 0, -1, -1, 0, 0},
{"VARIABLES", variables_fields_info, create_schema_table, fill_variables,
make_old_format, 0, 0, -1, 1, 0},
{"VIEWS", view_fields_info, create_schema_table,
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index bdecabd782b..db3d07a28a4 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -26,9 +26,11 @@
#ifdef HAVE_FCONVERT
#include <floatingpoint.h>
#endif
-
#include "sql_string.h"
+#ifdef MYSQL_CLIENT
+#error Attempt to use server-side sql_string on client. Use client/sql_string.cc
+#endif
/*****************************************************************************
** String functions
*****************************************************************************/
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 9e22f7b6a27..a9df0dc2620 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -1,3 +1,5 @@
+#ifndef MYSQL_SQL_STRING_H_INCLUDED
+#define MYSQL_SQL_STRING_H_INCLUDED
/* Copyright (C) 2000 MySQL AB
This program is free software; you can redistribute it and/or modify
@@ -23,6 +25,10 @@
#define NOT_FIXED_DEC 31
#endif
+#ifdef MYSQL_CLIENT
+#error Attempt to use server-side sql_string on client. Use client/sql_string.h
+#endif
+
class String;
int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
@@ -394,3 +400,5 @@ static inline bool check_if_only_end_space(CHARSET_INFO *cs, char *str,
{
return str+ cs->cset->scan(cs, str, end, MY_SEQ_SPACES) == end;
}
+
+#endif // MYSQL_SQL_STRING_H_INCLUDED
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 9cd16e51119..6e938583965 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2489,7 +2489,12 @@ int prepare_create_field(Create_field *sql_field,
(sql_field->decimals << FIELDFLAG_DEC_SHIFT));
break;
}
- if (!(sql_field->flags & NOT_NULL_FLAG))
+ if (sql_field->flags & NOT_NULL_FLAG)
+ DBUG_PRINT("info", ("1"));
+ if (sql_field->vcol_info)
+ DBUG_PRINT("info", ("2"));
+ if (!(sql_field->flags & NOT_NULL_FLAG) ||
+ (sql_field->vcol_info)) /* Make virtual columns allow NULL values */
sql_field->pack_flag|= FIELDFLAG_MAYBE_NULL;
if (sql_field->flags & NO_DEFAULT_VALUE_FLAG)
sql_field->pack_flag|= FIELDFLAG_NO_DEFAULT;
@@ -2803,6 +2808,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
null_fields--;
sql_field->flags= dup_field->flags;
sql_field->interval= dup_field->interval;
+ sql_field->vcol_info= dup_field->vcol_info;
+ sql_field->stored_in_db= dup_field->stored_in_db;
it2.remove(); // Remove first (create) definition
select_field_pos--;
break;
@@ -2835,7 +2842,23 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
sql_field->offset= record_offset;
if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER)
auto_increment++;
- record_offset+= sql_field->pack_length;
+ /*
+ For now skip fields that are not physically stored in the database
+ (virtual fields) and update their offset later
+ (see the next loop).
+ */
+ if (sql_field->stored_in_db)
+ record_offset+= sql_field->pack_length;
+ }
+ /* Update virtual fields' offset*/
+ it.rewind();
+ while ((sql_field=it++))
+ {
+ if (!sql_field->stored_in_db)
+ {
+ sql_field->offset= record_offset;
+ record_offset+= sql_field->pack_length;
+ }
}
if (timestamps_with_niladic > 1)
{
@@ -2885,6 +2908,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (key->type == Key::FOREIGN_KEY)
{
fk_key_count++;
+ if (((Foreign_key *)key)->validate(alter_info->create_list))
+ DBUG_RETURN(TRUE);
Foreign_key *fk_key= (Foreign_key*) key;
if (fk_key->ref_columns.elements &&
fk_key->ref_columns.elements != fk_key->columns.elements)
@@ -3171,6 +3196,17 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
}
#endif
+ if (!sql_field->stored_in_db)
+ {
+ /* Key fields must always be physically stored. */
+ my_error(ER_KEY_BASED_ON_GENERATED_VIRTUAL_COLUMN, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (key->type == Key::PRIMARY && sql_field->vcol_info)
+ {
+ my_error(ER_PRIMARY_KEY_BASED_ON_VIRTUAL_COLUMN, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
if (!(sql_field->flags & NOT_NULL_FLAG))
{
if (key->type == Key::PRIMARY)
@@ -5728,6 +5764,19 @@ compare_tables(TABLE *table,
DBUG_RETURN(0);
}
+ /*
+ Check if the altered column is computed and either
+ is stored or is used in the partitioning expression.
+ TODO: Mark such a column with an alter flag only if
+ the defining expression has changed.
+ */
+ if (field->vcol_info &&
+ (field->stored_in_db || field->vcol_info->is_in_partitioning_expr()))
+ {
+ *need_copy_table= ALTER_TABLE_DATA_CHANGED;
+ DBUG_RETURN(0);
+ }
+
/* Don't pack rows in old tables if the user has requested this. */
if (create_info->row_type == ROW_TYPE_DYNAMIC ||
(tmp_new_field->flags & BLOB_FLAG) ||
@@ -6092,6 +6141,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (def)
{ // Field is changed
def->field=field;
+ if (field->stored_in_db != def->stored_in_db)
+ {
+ my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN,
+ MYF(0),
+ "Changing the STORED status");
+ goto err;
+ }
if (!def->after)
{
new_create_list.push_back(def);
@@ -6304,6 +6360,9 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
Key *key;
while ((key=key_it++)) // Add new keys
{
+ if (key->type == Key::FOREIGN_KEY &&
+ ((Foreign_key *)key)->validate(new_create_list))
+ goto err;
if (key->type != Key::FOREIGN_KEY)
new_key_list.push_back(key);
if (key->name &&
@@ -7699,6 +7758,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
+ to->mark_virtual_columns_for_write();
init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE);
errpos= 4;
if (ignore)
@@ -7713,6 +7773,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
error= 1;
break;
}
+ update_virtual_fields(from);
thd->row_count++;
/* Return error if source table isn't empty. */
if (error_if_not_empty)
@@ -7733,6 +7794,12 @@ copy_data_between_tables(TABLE *from,TABLE *to,
copy_ptr->do_copy(copy_ptr);
}
prev_insert_id= to->file->next_insert_id;
+ update_virtual_fields(to, TRUE);
+ if (thd->is_error())
+ {
+ error= 1;
+ break;
+ }
error=to->file->ha_write_row(to->record[0]);
to->auto_increment_field_not_null= FALSE;
if (error)
@@ -7914,7 +7981,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
goto err;
}
ha_checksum row_crc= 0;
- int error= t->file->rnd_next(t->record[0]);
+ int error= t->file->ha_rnd_next(t->record[0]);
if (unlikely(error))
{
if (error == HA_ERR_RECORD_DELETED)
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index c6b41b59a3f..b2008baf644 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -567,10 +567,10 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name)
goto err;
table->use_all_columns();
table->field[0]->store(exact_name_str, exact_name_len, &my_charset_bin);
- if (!table->file->index_read_idx_map(table->record[0], 0,
- (uchar*) table->field[0]->ptr,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ if (!table->file->ha_index_read_idx_map(table->record[0], 0,
+ (uchar*) table->field[0]->ptr,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{
int error;
if ((error = table->file->ha_delete_row(table->record[0])))
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 00769d2abda..a983e64c01d 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -142,7 +142,7 @@ static void prepare_record_for_error_message(int error, TABLE *table)
/* Tell the engine about the new set. */
table->file->column_bitmaps_signal();
/* Read record that is identified by table->file->ref. */
- (void) table->file->rnd_pos(table->record[1], table->file->ref);
+ (void) table->file->ha_rnd_pos(table->record[1], table->file->ref);
/* Copy the newly read columns into the new record. */
for (field_p= table->field; (field= *field_p); field_p++)
if (bitmap_is_set(&unique_map, field->field_index))
@@ -1934,7 +1934,7 @@ int multi_update::do_updates()
{
if (thd->killed && trans_safe)
goto err;
- if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
+ if ((local_error= tmp_table->file->ha_rnd_next(tmp_table->record[0])))
{
if (local_error == HA_ERR_END_OF_FILE)
break;
@@ -1949,12 +1949,12 @@ int multi_update::do_updates()
uint field_num= 0;
do
{
- if((local_error=
- tbl->file->rnd_pos(tbl->record[0],
- (uchar *) tmp_table->field[field_num]->ptr)))
+ if ((local_error=
+ tbl->file->ha_rnd_pos(tbl->record[0],
+ (uchar*) tmp_table->field[field_num]->ptr)))
goto err;
field_num++;
- } while((tbl= check_opt_it++));
+ } while ((tbl= check_opt_it++));
table->status|= STATUS_UPDATED;
store_record(table,record[1]);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index c9a08200479..976d130b26b 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -549,6 +549,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token ALGORITHM_SYM
%token ALL /* SQL-2003-R */
%token ALTER /* SQL-2003-R */
+%token ALWAYS_SYM
%token ANALYZE_SYM
%token AND_AND_SYM /* OPERATOR */
%token AND_SYM /* SQL-2003-R */
@@ -598,6 +599,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CHECK_SYM /* SQL-2003-R */
%token CIPHER_SYM
%token CLIENT_SYM
+%token CLIENT_STATS_SYM
%token CLOSE_SYM /* SQL-2003-R */
%token COALESCE /* SQL-2003-N */
%token CODE_SYM
@@ -715,6 +717,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token FULLTEXT_SYM
%token FUNCTION_SYM /* SQL-2003-R */
%token GE
+%token GENERATED_SYM
%token GEOMETRYCOLLECTION
%token GEOMETRY_SYM
%token GET_FORMAT /* MYSQL-FUNC */
@@ -744,6 +747,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token IMPORT
%token INDEXES
%token INDEX_SYM
+%token INDEX_STATS_SYM
%token INFILE
%token INITIAL_SIZE_SYM
%token INNER_SYM /* SQL-2003-R */
@@ -892,11 +896,13 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token PAGE_CHECKSUM_SYM
%token PARAM_MARKER
%token PARSER_SYM
+%token PARSE_VCOL_EXPR_SYM
%token PARTIAL /* SQL-2003-N */
%token PARTITIONING_SYM
%token PARTITIONS_SYM
%token PARTITION_SYM /* SQL-2003-R */
%token PASSWORD
+%token PERSISTENT_SYM
%token PHASE_SYM
%token PLUGINS_SYM
%token PLUGIN_SYM
@@ -985,6 +991,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token SIGNED_SYM
%token SIMPLE_SYM /* SQL-2003-N */
%token SLAVE
+%token SLOW_SYM
%token SMALLINT /* SQL-2003-R */
%token SNAPSHOT_SYM
%token SOCKET_SYM
@@ -1029,6 +1036,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token TABLES
%token TABLESPACE
%token TABLE_REF_PRIORITY
+%token TABLE_STATS_SYM
%token TABLE_SYM /* SQL-2003-R */
%token TABLE_CHECKSUM_SYM
%token TEMPORARY /* SQL-2003-N */
@@ -1076,6 +1084,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token UPGRADE_SYM
%token USAGE /* SQL-2003-N */
%token USER /* SQL-2003-R */
+%token USER_STATS_SYM
%token USE_FRM
%token USE_SYM
%token USING /* SQL-2003-R */
@@ -1090,6 +1099,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token VARIANCE_SYM
%token VARYING /* SQL-2003-R */
%token VAR_SAMP_SYM
+%token VIRTUAL_SYM
%token VIEW_SYM /* SQL-2003-N */
%token WAIT_SYM
%token WARNINGS
@@ -1149,7 +1159,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
text_string opt_gconcat_separator
%type <num>
- type int_type real_type order_dir lock_option
+ type int_type real_type order_dir lock_option field_def
udf_type if_exists opt_local opt_table_options table_options
table_option opt_if_not_exists opt_no_write_to_binlog
delete_option opt_temporary all_or_any opt_distinct
@@ -1305,6 +1315,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
init_key_options key_options key_opts key_opt key_using_alg
server_def server_options_list server_option
definer_opt no_definer definer
+ parse_vcol_expr vcol_opt_specifier vcol_opt_attribute
+ vcol_opt_attribute_list vcol_attribute
END_OF_INPUT
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
@@ -1436,6 +1448,7 @@ statement:
| lock
| optimize
| keycache
+ | parse_vcol_expr
| partition_entry
| preload
| prepare
@@ -4766,8 +4779,9 @@ field_spec:
lex->default_value= lex->on_update_value= 0;
lex->comment=null_lex_str;
lex->charset=NULL;
+ lex->vcol_info= 0;
}
- type opt_attribute
+ field_def
{
LEX *lex=Lex;
if (add_field_to_list(lex->thd, &$1, (enum enum_field_types) $3,
@@ -4775,11 +4789,101 @@ field_spec:
lex->default_value, lex->on_update_value,
&lex->comment,
lex->change,&lex->interval_list,lex->charset,
- lex->uint_geom_type))
+ lex->uint_geom_type,
+ lex->vcol_info))
MYSQL_YYABORT;
}
;
+field_def:
+ type opt_attribute {}
+ | type opt_generated_always AS '(' virtual_column_func ')'
+ vcol_opt_specifier
+ vcol_opt_attribute
+ {
+ $$= (enum enum_field_types)MYSQL_TYPE_VIRTUAL;
+ Lex->vcol_info->set_field_type((enum enum_field_types) $1);
+ }
+ ;
+
+opt_generated_always:
+ /* empty */
+ | GENERATED_SYM ALWAYS_SYM {}
+ ;
+
+vcol_opt_specifier:
+ /* empty */
+ {
+ Lex->vcol_info->set_stored_in_db_flag(FALSE);
+ }
+ | VIRTUAL_SYM
+ {
+ Lex->vcol_info->set_stored_in_db_flag(FALSE);
+ }
+ | PERSISTENT_SYM
+ {
+ Lex->vcol_info->set_stored_in_db_flag(TRUE);
+ }
+ ;
+
+vcol_opt_attribute:
+ /* empty */ {}
+ | vcol_opt_attribute_list {}
+ ;
+
+vcol_opt_attribute_list:
+ vcol_opt_attribute_list vcol_attribute {}
+ | vcol_attribute
+ ;
+
+vcol_attribute:
+ UNIQUE_SYM
+ {
+ LEX *lex=Lex;
+ lex->type|= UNIQUE_FLAG;
+ lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | UNIQUE_SYM KEY_SYM
+ {
+ LEX *lex=Lex;
+ lex->type|= UNIQUE_KEY_FLAG;
+ lex->alter_info.flags|= ALTER_ADD_INDEX;
+ }
+ | COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; }
+ ;
+
+parse_vcol_expr:
+ PARSE_VCOL_EXPR_SYM '(' virtual_column_func ')'
+ {
+ /*
+ "PARSE_VCOL_EXPR" can only be used by the SQL server
+ when reading a '*.frm' file.
+ Prevent the end user from invoking this command.
+ */
+ if (!Lex->parse_vcol_expr)
+ {
+ my_message(ER_SYNTAX_ERROR, ER(ER_SYNTAX_ERROR), MYF(0));
+ MYSQL_YYABORT;
+ }
+ }
+ ;
+
+virtual_column_func:
+ remember_name expr remember_end
+ {
+ Lex->vcol_info= new Virtual_column_info();
+ if (!Lex->vcol_info)
+ {
+ mem_alloc_error(sizeof(Virtual_column_info));
+ MYSQL_YYABORT;
+ }
+ uint expr_len= (uint)($3 - $1) - 1;
+ Lex->vcol_info->expr_str.str= (char* ) sql_memdup($1 + 1, expr_len);
+ Lex->vcol_info->expr_str.length= expr_len;
+ Lex->vcol_info->expr_item= $2;
+ }
+ ;
+
type:
int_type opt_field_length field_options { $$=$1; }
| real_type opt_precision field_options { $$=$1; }
@@ -5867,8 +5971,9 @@ alter_list_item:
lex->comment=null_lex_str;
lex->charset= NULL;
lex->alter_info.flags|= ALTER_CHANGE_COLUMN;
+ lex->vcol_info= 0;
}
- type opt_attribute
+ field_def
{
LEX *lex=Lex;
if (add_field_to_list(lex->thd,&$3,
@@ -5877,7 +5982,8 @@ alter_list_item:
lex->default_value, lex->on_update_value,
&lex->comment,
$3.str, &lex->interval_list, lex->charset,
- lex->uint_geom_type))
+ lex->uint_geom_type,
+ lex->vcol_info))
MYSQL_YYABORT;
}
opt_place
@@ -10138,6 +10244,34 @@ show_param:
{
Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT;
}
+ | CLIENT_STATS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_CLIENT_STATS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_CLIENT_STATS))
+ MYSQL_YYABORT;
+ }
+ | USER_STATS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_USER_STATS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_USER_STATS))
+ MYSQL_YYABORT;
+ }
+ | TABLE_STATS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_TABLE_STATS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_STATS))
+ MYSQL_YYABORT;
+ }
+ | INDEX_STATS_SYM
+ {
+ LEX *lex= Lex;
+ lex->sql_command= SQLCOM_SHOW_INDEX_STATS;
+ if (prepare_schema_table(YYTHD, lex, 0, SCH_INDEX_STATS))
+ MYSQL_YYABORT;
+ }
| CREATE PROCEDURE sp_name
{
LEX *lex= Lex;
@@ -10346,6 +10480,16 @@ flush_option:
{ Lex->type|= REFRESH_STATUS; }
| SLAVE
{ Lex->type|= REFRESH_SLAVE; }
+ | SLOW_SYM QUERY_SYM LOGS_SYM
+ { Lex->type |= REFRESH_SLOW_QUERY_LOG; }
+ | CLIENT_STATS_SYM
+ { Lex->type|= REFRESH_CLIENT_STATS; }
+ | USER_STATS_SYM
+ { Lex->type|= REFRESH_USER_STATS; }
+ | TABLE_STATS_SYM
+ { Lex->type|= REFRESH_TABLE_STATS; }
+ | INDEX_STATS_SYM
+ { Lex->type|= REFRESH_INDEX_STATS; }
| MASTER_SYM
{ Lex->type|= REFRESH_MASTER; }
| DES_KEY_FILE
@@ -11431,6 +11575,7 @@ keyword_sp:
| AGAINST {}
| AGGREGATE_SYM {}
| ALGORITHM_SYM {}
+ | ALWAYS_SYM {}
| ANY_SYM {}
| AT_SYM {}
| AUTHORS_SYM {}
@@ -11448,6 +11593,7 @@ keyword_sp:
| CHAIN_SYM {}
| CHANGED {}
| CIPHER_SYM {}
+ | CLIENT_STATS_SYM {}
| CLIENT_SYM {}
| COALESCE {}
| CODE_SYM {}
@@ -11500,6 +11646,7 @@ keyword_sp:
| FIRST_SYM {}
| FIXED_SYM {}
| FRAC_SECOND_SYM {}
+ | GENERATED_SYM {}
| GEOMETRY_SYM {}
| GEOMETRYCOLLECTION {}
| GET_FORMAT {}
@@ -11509,6 +11656,7 @@ keyword_sp:
| HOSTS_SYM {}
| HOUR_SYM {}
| IDENTIFIED_SYM {}
+ | INDEX_STATS_SYM {}
| INVOKER_SYM {}
| IMPORT {}
| INDEXES {}
@@ -11587,6 +11735,7 @@ keyword_sp:
| PARTITIONING_SYM {}
| PARTITIONS_SYM {}
| PASSWORD {}
+ | PERSISTENT_SYM {}
| PHASE_SYM {}
| PLUGIN_SYM {}
| PLUGINS_SYM {}
@@ -11632,6 +11781,7 @@ keyword_sp:
| SIMPLE_SYM {}
| SHARE_SYM {}
| SHUTDOWN {}
+ | SLOW_SYM {}
| SNAPSHOT_SYM {}
| SOUNDS_SYM {}
| SOURCE_SYM {}
@@ -11651,6 +11801,7 @@ keyword_sp:
| SUSPEND_SYM {}
| SWAPS_SYM {}
| SWITCHES_SYM {}
+ | TABLE_STATS_SYM {}
| TABLES {}
| TABLE_CHECKSUM_SYM {}
| TABLESPACE {}
@@ -11676,9 +11827,11 @@ keyword_sp:
| UNKNOWN_SYM {}
| UNTIL_SYM {}
| USER {}
+ | USER_STATS_SYM {}
| USE_FRM {}
| VARIABLES {}
| VIEW_SYM {}
+ | VIRTUAL_SYM {}
| VALUE_SYM {}
| WARNINGS {}
| WAIT_SYM {}
@@ -13339,6 +13492,7 @@ sf_tail:
lex->length= lex->dec= NULL;
lex->interval_list.empty();
lex->type= 0;
+ lex->vcol_info= 0;
}
type /* $11 */
{ /* $12 */
diff --git a/sql/structs.h b/sql/structs.h
index 2546d241059..0772cb4ff1d 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -76,6 +76,7 @@ typedef struct st_key {
uint extra_length;
uint usable_key_parts; /* Should normally be = key_parts */
uint block_size;
+ uint name_length;
enum ha_key_alg algorithm;
/*
Note that parser is used when the table is opened for use, and
@@ -88,6 +89,8 @@ typedef struct st_key {
};
KEY_PART_INFO *key_part;
char *name; /* Name of key */
+ /* Unique name for cache; db + \0 + table_name + \0 + key_name + \0 */
+ uchar *cache_name;
/*
Array of AVG(#records with the same field value) for 1st ... Nth key part.
0 means 'not known'.
@@ -237,6 +240,111 @@ typedef struct user_conn {
USER_RESOURCES user_resources;
} USER_CONN;
+typedef struct st_user_stats
+{
+ char user[max(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1];
+ // Account name the user is mapped to when this is a user from mapped_user.
+ // Otherwise, the same value as user.
+ char priv_user[max(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1];
+ uint user_name_length;
+ uint total_connections;
+ uint concurrent_connections;
+ time_t connected_time; // in seconds
+ double busy_time; // in seconds
+ double cpu_time; // in seconds
+ ulonglong bytes_received;
+ ulonglong bytes_sent;
+ ulonglong binlog_bytes_written;
+ ha_rows rows_read, rows_sent;
+ ha_rows rows_updated, rows_deleted, rows_inserted;
+ ulonglong select_commands, update_commands, other_commands;
+ ulonglong commit_trans, rollback_trans;
+ ulonglong denied_connections, lost_connections;
+ ulonglong access_denied_errors;
+ ulonglong empty_queries;
+} USER_STATS;
+
+/* Lookup function for hash tables with USER_STATS entries */
+extern "C" uchar *get_key_user_stats(USER_STATS *user_stats, size_t *length,
+ my_bool not_used __attribute__((unused)));
+
+/* Free all memory for a hash table with USER_STATS entries */
+extern void free_user_stats(USER_STATS* user_stats);
+
+/* Intialize an instance of USER_STATS */
+extern void
+init_user_stats(USER_STATS *user_stats,
+ const char *user,
+ size_t user_length,
+ const char *priv_user,
+ uint total_connections,
+ uint concurrent_connections,
+ time_t connected_time,
+ double busy_time,
+ double cpu_time,
+ ulonglong bytes_received,
+ ulonglong bytes_sent,
+ ulonglong binlog_bytes_written,
+ ha_rows rows_sent,
+ ha_rows rows_read,
+ ha_rows rows_inserted,
+ ha_rows rows_deleted,
+ ha_rows rows_updated,
+ ulonglong select_commands,
+ ulonglong update_commands,
+ ulonglong other_commands,
+ ulonglong commit_trans,
+ ulonglong rollback_trans,
+ ulonglong denied_connections,
+ ulonglong lost_connections,
+ ulonglong access_denied_errors,
+ ulonglong empty_queries);
+
+/* Increment values of an instance of USER_STATS */
+extern void
+add_user_stats(USER_STATS *user_stats,
+ uint total_connections,
+ uint concurrent_connections,
+ time_t connected_time,
+ double busy_time,
+ double cpu_time,
+ ulonglong bytes_received,
+ ulonglong bytes_sent,
+ ulonglong binlog_bytes_written,
+ ha_rows rows_sent,
+ ha_rows rows_read,
+ ha_rows rows_inserted,
+ ha_rows rows_deleted,
+ ha_rows rows_updated,
+ ulonglong select_commands,
+ ulonglong update_commands,
+ ulonglong other_commands,
+ ulonglong commit_trans,
+ ulonglong rollback_trans,
+ ulonglong denied_connections,
+ ulonglong lost_connections,
+ ulonglong access_denied_errors,
+ ulonglong empty_queries);
+
+typedef struct st_table_stats
+{
+ char table[NAME_LEN * 2 + 2]; // [db] + '\0' + [table] + '\0'
+ uint table_name_length;
+ ulonglong rows_read, rows_changed;
+ ulonglong rows_changed_x_indexes;
+ /* Stores enum db_type, but forward declarations cannot be done */
+ int engine_type;
+} TABLE_STATS;
+
+typedef struct st_index_stats
+{
+ // [db] + '\0' + [table] + '\0' + [index] + '\0'
+ char index[NAME_LEN * 3 + 3];
+ uint index_name_length; /* Length of 'index' */
+ ulonglong rows_read;
+} INDEX_STATS;
+
+
/* Bits in form->update */
#define REG_MAKE_DUPP 1 /* Make a copy of record when read */
#define REG_NEW_RECORD 2 /* Write a new record if not found */
diff --git a/sql/table.cc b/sql/table.cc
index 2e9ec7c9701..0f3f388a199 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -33,6 +33,12 @@ LEX_STRING GENERAL_LOG_NAME= {C_STRING_WITH_LEN("general_log")};
/* SLOW_LOG name */
LEX_STRING SLOW_LOG_NAME= {C_STRING_WITH_LEN("slow_log")};
+/*
+ Keyword added as a prefix when parsing the defining expression for a
+ virtual column read from the column definition saved in the frm file
+*/
+LEX_STRING parse_vcol_keyword= { C_STRING_WITH_LEN("PARSE_VCOL_EXPR ") };
+
/* Functions defined in this file */
void open_table_error(TABLE_SHARE *share, int error, int db_errno,
@@ -663,10 +669,11 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
uint interval_count, interval_parts, read_length, int_length;
uint db_create_options, keys, key_parts, n_length;
uint key_info_length, com_length, null_bit_pos;
+ uint vcol_screen_length;
uint extra_rec_buf_length;
uint i,j;
bool use_hash;
- char *keynames, *names, *comment_pos;
+ char *keynames, *names, *comment_pos, *vcol_screen_pos;
uchar *record;
uchar *disk_buff, *strpos, *null_flags, *null_pos;
ulong pos, record_offset, *rec_per_key, rec_buff_length;
@@ -836,6 +843,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
strpos+= (strmov(keynames, (char *) strpos) - keynames)+1;
share->reclength = uint2korr((head+16));
+ share->stored_rec_length= share->reclength;
if (*(head+26) == 1)
share->system= 1; /* one-record-database */
#ifdef HAVE_CRYPTED_FRM
@@ -1049,24 +1057,28 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
int_length= uint2korr(head+274);
share->null_fields= uint2korr(head+282);
com_length= uint2korr(head+284);
+ vcol_screen_length= uint2korr(head+286);
+ share->vfields= 0;
+ share->stored_fields= share->fields;
share->comment.length= (int) (head[46]);
share->comment.str= strmake_root(&share->mem_root, (char*) head+47,
share->comment.length);
- DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, share->keys,n_length,int_length, com_length));
-
+ DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d vcol_screen_length: %d", interval_count,interval_parts, share->keys,n_length,int_length, com_length, vcol_screen_length));
if (!(field_ptr = (Field **)
alloc_root(&share->mem_root,
(uint) ((share->fields+1)*sizeof(Field*)+
interval_count*sizeof(TYPELIB)+
(share->fields+interval_parts+
keys+3)*sizeof(char *)+
- (n_length+int_length+com_length)))))
+ (n_length+int_length+com_length+
+ vcol_screen_length)))))
goto err; /* purecov: inspected */
share->field= field_ptr;
read_length=(uint) (share->fields * field_pack_length +
- pos+ (uint) (n_length+int_length+com_length));
+ pos+ (uint) (n_length+int_length+com_length+
+ vcol_screen_length));
if (read_string(file,(uchar**) &disk_buff,read_length))
goto err; /* purecov: inspected */
#ifdef HAVE_CRYPTED_FRM
@@ -1087,7 +1099,11 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
memcpy((char*) names, strpos+(share->fields*field_pack_length),
(uint) (n_length+int_length));
comment_pos= names+(n_length+int_length);
- memcpy(comment_pos, disk_buff+read_length-com_length, com_length);
+ memcpy(comment_pos, disk_buff+read_length-com_length-vcol_screen_length,
+ com_length);
+ vcol_screen_pos= names+(n_length+int_length+com_length);
+ memcpy(vcol_screen_pos, disk_buff+read_length-vcol_screen_length,
+ vcol_screen_length);
fix_type_pointers(&interval_array, &share->fieldnames, 1, &names);
if (share->fieldnames.count != share->fields)
@@ -1155,10 +1171,14 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++)
{
uint pack_flag, interval_nr, unireg_type, recpos, field_length;
+ uint vcol_info_length=0;
+ uint vcol_expr_length=0;
enum_field_types field_type;
CHARSET_INFO *charset=NULL;
Field::geometry_type geom_type= Field::GEOM_GEOMETRY;
LEX_STRING comment;
+ Virtual_column_info *vcol_info= 0;
+ bool fld_stored_in_db= TRUE;
if (new_frm_ver >= 3)
{
@@ -1193,6 +1213,18 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
goto err;
}
}
+
+ if ((uchar)field_type == (uchar)MYSQL_TYPE_VIRTUAL)
+ {
+ DBUG_ASSERT(interval_nr); // Expect non-null expression
+ /*
+ The interval_id byte in the .frm file stores the length of the
+ expression statement for a virtual column.
+ */
+ vcol_info_length= interval_nr;
+ interval_nr= 0;
+ }
+
if (!comment_length)
{
comment.str= (char*) "";
@@ -1204,6 +1236,34 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
comment.length= comment_length;
comment_pos+= comment_length;
}
+
+ if (vcol_info_length)
+ {
+ /*
+ Get virtual column data stored in the .frm file as follows:
+ byte 1 = 1 (always 1 to allow for future extensions)
+ byte 2 = sql_type
+ byte 3 = flags (as of now, 0 - no flags, 1 - field is physically stored)
+ byte 4-... = virtual column expression (text data)
+ */
+ vcol_info= new Virtual_column_info();
+ if ((uint)vcol_screen_pos[0] != 1)
+ {
+ error= 4;
+ goto err;
+ }
+ field_type= (enum_field_types) (uchar) vcol_screen_pos[1];
+ fld_stored_in_db= (bool) (uint) vcol_screen_pos[2];
+ vcol_expr_length= vcol_info_length-(uint)FRM_VCOL_HEADER_SIZE;
+ if (!(vcol_info->expr_str.str=
+ (char *)memdup_root(&share->mem_root,
+ vcol_screen_pos+(uint)FRM_VCOL_HEADER_SIZE,
+ vcol_expr_length)))
+ goto err;
+ vcol_info->expr_str.length= vcol_expr_length;
+ vcol_screen_pos+= vcol_info_length;
+ share->vfields++;
+ }
}
else
{
@@ -1294,6 +1354,8 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
reg_field->field_index= i;
reg_field->comment=comment;
+ reg_field->vcol_info= vcol_info;
+ reg_field->stored_in_db= fld_stored_in_db;
if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
{
if ((null_bit_pos+= field_length & 7) > 7)
@@ -1318,8 +1380,17 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
if (use_hash)
(void) my_hash_insert(&share->name_hash,
(uchar*) field_ptr); // never fail
+ if (!reg_field->stored_in_db)
+ {
+ share->stored_fields--;
+ if (share->stored_rec_length>=recpos)
+ share->stored_rec_length= recpos-1;
+ }
}
*field_ptr=0; // End marker
+ /* Sanity checks: */
+ DBUG_ASSERT(share->fields>=share->stored_fields);
+ DBUG_ASSERT(share->reclength>=share->stored_rec_length);
/* Fix key->name and key_part->field */
if (key_parts)
@@ -1334,6 +1405,19 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
uint usable_parts= 0;
keyinfo->name=(char*) share->keynames.type_names[key];
+ keyinfo->name_length= strlen(keyinfo->name);
+ keyinfo->cache_name=
+ (uchar*) alloc_root(&share->mem_root,
+ share->table_cache_key.length+
+ keyinfo->name_length + 1);
+ if (keyinfo->cache_name) // If not out of memory
+ {
+ uchar *pos= keyinfo->cache_name;
+ memcpy(pos, share->table_cache_key.str, share->table_cache_key.length);
+ memcpy(pos + share->table_cache_key.length, keyinfo->name,
+ keyinfo->name_length+1);
+ }
+
/* Fix fulltext keys for old .frm files */
if (share->key_info[key].flags & HA_FULLTEXT)
share->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT;
@@ -1606,6 +1690,297 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
DBUG_RETURN(error);
} /* open_binary_frm */
+/*
+ @brief
+ Clear GET_FIXED_FIELDS_FLAG in all fields of a table
+
+ @param
+ table The table for whose fields the flags are to be cleared
+
+ @note
+ This routine is used for error handling purposes.
+
+ @return
+ none
+*/
+
+static void clear_field_flag(TABLE *table)
+{
+ Field **ptr;
+ DBUG_ENTER("clear_field_flag");
+
+ for (ptr= table->field; *ptr; ptr++)
+ (*ptr)->flags&= (~GET_FIXED_FIELDS_FLAG);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ @brief
+ Perform semantic analysis of the defining expression for a virtual column
+
+ @param
+ thd The thread object
+ @param
+ table The table containing the virtual column
+ @param
+ vcol_field The virtual field whose defining expression is to be analyzed
+
+ @details
+ The function performs semantic analysis of the defining expression for
+ the virtual column vcol_field. The expression is used to compute the
+ values of this column.
+
+ @note
+ The function exploits the fact that the fix_fields method sets the flag
+ GET_FIXED_FIELDS_FLAG for all fields in the item tree.
+ This flag must always be unset before returning from this function
+ since it is used for other purposes as well.
+
+ @retval
+ TRUE An error occurred, something was wrong with the function
+ @retval
+ FALSE Otherwise
+*/
+
+bool fix_vcol_expr(THD *thd,
+ TABLE *table,
+ Field *vcol_field)
+{
+ Virtual_column_info *vcol_info= vcol_field->vcol_info;
+ Item* func_expr= vcol_info->expr_item;
+ uint dir_length, home_dir_length;
+ bool result= TRUE;
+ TABLE_LIST tables;
+ TABLE_LIST *save_table_list, *save_first_table, *save_last_table;
+ int error;
+ Name_resolution_context *context;
+ const char *save_where;
+ char* db_name;
+ char db_name_string[FN_REFLEN];
+ bool save_use_only_table_context;
+ Field **ptr, *field;
+ enum_mark_columns save_mark_used_columns= thd->mark_used_columns;
+ DBUG_ASSERT(func_expr);
+ DBUG_ENTER("fix_vcol_expr");
+
+ /*
+ Set-up the TABLE_LIST object to be a list with a single table
+ Set the object to zero to create NULL pointers and set alias
+ and real name to table name and get database name from file name.
+ */
+
+ bzero((void*)&tables, sizeof(TABLE_LIST));
+ tables.alias= tables.table_name= (char*) table->s->table_name.str;
+ tables.table= table;
+ tables.next_local= 0;
+ tables.next_name_resolution_table= 0;
+ strmov(db_name_string, table->s->normalized_path.str);
+ dir_length= dirname_length(db_name_string);
+ db_name_string[dir_length - 1]= 0;
+ home_dir_length= dirname_length(db_name_string);
+ db_name= &db_name_string[home_dir_length];
+ tables.db= db_name;
+
+ thd->mark_used_columns= MARK_COLUMNS_NONE;
+
+ context= thd->lex->current_context();
+ table->map= 1; //To ensure correct calculation of const item
+ table->get_fields_in_item_tree= TRUE;
+ save_table_list= context->table_list;
+ save_first_table= context->first_name_resolution_table;
+ save_last_table= context->last_name_resolution_table;
+ context->table_list= &tables;
+ context->first_name_resolution_table= &tables;
+ context->last_name_resolution_table= NULL;
+ func_expr->walk(&Item::change_context_processor, 0, (uchar*) context);
+ save_where= thd->where;
+ thd->where= "virtual column function";
+
+ /* Save the context before fixing the fields*/
+ save_use_only_table_context= thd->lex->use_only_table_context;
+ thd->lex->use_only_table_context= TRUE;
+ /* Fix fields referenced to by the virtual column function */
+ error= func_expr->fix_fields(thd, (Item**)0);
+ /* Restore the original context*/
+ thd->lex->use_only_table_context= save_use_only_table_context;
+ context->table_list= save_table_list;
+ context->first_name_resolution_table= save_first_table;
+ context->last_name_resolution_table= save_last_table;
+
+ if (unlikely(error))
+ {
+ DBUG_PRINT("info",
+ ("Field in virtual column expression does not belong to the table"));
+ goto end;
+ }
+ thd->where= save_where;
+#ifdef PARANOID
+ /*
+ Walk through the Item tree checking if all items are valid
+ to be part of the virtual column
+ */
+ error= func_expr->walk(&Item::check_vcol_func_processor, 0, NULL);
+ if (error)
+ {
+ my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(0), field_name);
+ goto end;
+ }
+#endif
+ if (unlikely(func_expr->const_item()))
+ {
+ my_error(ER_CONST_EXPR_IN_VCOL, MYF(0));
+ goto end;
+ }
+ /* Ensure that this virtual column is not based on another virtual field. */
+ ptr= table->field;
+ while ((field= *(ptr++)))
+ {
+ if ((field->flags & GET_FIXED_FIELDS_FLAG) &&
+ (field->vcol_info))
+ {
+ my_error(ER_VCOL_BASED_ON_VCOL, MYF(0));
+ goto end;
+ }
+ }
+ result= FALSE;
+
+end:
+
+ /* Clear GET_FIXED_FIELDS_FLAG for the fields of the table */
+ clear_field_flag(table);
+
+ table->get_fields_in_item_tree= FALSE;
+ thd->mark_used_columns= save_mark_used_columns;
+ table->map= 0; //Restore old value
+
+ DBUG_RETURN(result);
+}
+
+/*
+ @brief
+ Unpack the definition of a virtual column from its linear representation
+
+ @parm
+ thd The thread object
+ @param
+ table The table containing the virtual column
+ @param
+ field The field for the virtual
+ @param
+ vcol_expr The string representation of the defining expression
+ @param[out]
+ error_reported The flag to inform the caller that no other error
+ messages are to be generated
+
+ @details
+ The function takes string representation 'vcol_expr' of the defining
+ expression for the virtual field 'field' of the table 'table' and
+ parses it, building an item object for it. The pointer to this item is
+ placed into in field->vcol_info.expr_item. After this the function performs
+ semantic analysis of the item by calling the the function fix_vcol_expr.
+ Since the defining expression is part of the table definition the item
+ for it is created in table->memroot within a separate Query_arena.
+ The free_list of this arena is saved in field->vcol_info.item_free_list
+ to be freed when the table defition is removed from the TABLE_SHARE cache.
+
+ @note
+ Before passing 'vcol_expr" to the parser the function embraces it in
+ parenthesis and prepands it a special keyword.
+
+ @retval
+ FALSE If a success
+ @retval
+ TRUE Otherwise
+*/
+bool unpack_vcol_info_from_frm(THD *thd,
+ TABLE *table,
+ Field *field,
+ LEX_STRING *vcol_expr,
+ bool *error_reported)
+{
+ bool rc= FALSE;
+ DBUG_ENTER("unpack_vcol_info_from_frm");
+ DBUG_ASSERT(vcol_expr);
+
+ /*
+ Step 1: Construct the input string for the parser.
+ The string to be parsed has to be of the following format:
+ "PARSE_VCOL_EXPR (<expr_string_from_frm>)".
+ */
+ char *vcol_expr_str;
+ int str_len= 0;
+ CHARSET_INFO *old_character_set_client;
+
+ if (!(vcol_expr_str= (char*) alloc_root(&table->mem_root,
+ vcol_expr->length +
+ parse_vcol_keyword.length + 3)))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ memcpy(vcol_expr_str,
+ (char*) parse_vcol_keyword.str,
+ parse_vcol_keyword.length);
+ str_len= parse_vcol_keyword.length;
+ memcpy(vcol_expr_str + str_len, "(", 1);
+ str_len++;
+ memcpy(vcol_expr_str + str_len,
+ (char*) vcol_expr->str,
+ vcol_expr->length);
+ str_len+= vcol_expr->length;
+ memcpy(vcol_expr_str + str_len, ")", 1);
+ str_len++;
+ memcpy(vcol_expr_str + str_len, "\0", 1);
+ str_len++;
+ Parser_state parser_state(thd, vcol_expr_str, str_len);
+
+ /*
+ Step 2: Setup thd for parsing.
+ */
+ Query_arena *backup_stmt_arena_ptr= thd->stmt_arena;
+ Query_arena backup_arena;
+ Query_arena vcol_arena(&table->mem_root, Query_arena::INITIALIZED);
+ thd->set_n_backup_active_arena(&vcol_arena, &backup_arena);
+ thd->stmt_arena= &vcol_arena;
+
+ thd->lex->parse_vcol_expr= TRUE;
+ old_character_set_client= thd->variables.character_set_client;
+
+ /*
+ Step 3: Use the parser to build an Item object from vcol_expr_str.
+ */
+ if (parse_sql(thd, &parser_state, NULL))
+ {
+ goto err;
+ }
+ /* From now on use vcol_info generated by the parser. */
+ field->vcol_info= thd->lex->vcol_info;
+
+ /* Validate the Item tree. */
+ if (fix_vcol_expr(thd, table, field))
+ {
+ *error_reported= TRUE;
+ field->vcol_info= 0;
+ goto err;
+ }
+ field->vcol_info->item_free_list= thd->free_list;
+ goto end;
+
+err:
+ rc= TRUE;
+ thd->lex->parse_vcol_expr= FALSE;
+ thd->free_items();
+end:
+ thd->stmt_arena= backup_stmt_arena_ptr;
+ thd->restore_active_arena(&vcol_arena, &backup_arena);
+ thd->variables.character_set_client= old_character_set_client;
+
+ DBUG_RETURN(rc);
+}
+
+/*
+ Read data from a binary .frm file from MySQL 3.23 - 5.0 into TABLE_SHARE
+*/
/*
Open a table based on a TABLE_SHARE
@@ -1640,7 +2015,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
uint records, i, bitmap_size;
bool error_reported= FALSE;
uchar *record, *bitmaps;
- Field **field_ptr;
+ Field **field_ptr, **vfield_ptr;
DBUG_ENTER("open_table_from_share");
DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
share->table_name.str, (long) outparam));
@@ -1794,6 +2169,34 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
}
}
+ /*
+ Process virtual columns, if any.
+ */
+ if (!(vfield_ptr = (Field **) alloc_root(&outparam->mem_root,
+ (uint) ((share->vfields+1)*
+ sizeof(Field*)))))
+ goto err;
+
+ outparam->vfield= vfield_ptr;
+
+ for (field_ptr= outparam->field; *field_ptr; field_ptr++)
+ {
+ if ((*field_ptr)->vcol_info)
+ {
+ if (unpack_vcol_info_from_frm(thd,
+ outparam,
+ *field_ptr,
+ &(*field_ptr)->vcol_info->expr_str,
+ &error_reported))
+ {
+ error= 4; // in case no error is reported
+ goto err;
+ }
+ *(vfield_ptr++)= *field_ptr;
+ }
+ }
+ *vfield_ptr= 0; // End marker
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (share->partition_info_len && outparam->file)
{
@@ -1861,10 +2264,24 @@ partititon_err:
}
#endif
+ /* Check virtual columns against table's storage engine. */
+ if (share->vfields &&
+ ((outparam->file &&
+ !outparam->file->check_if_supported_virtual_columns()) ||
+ (!outparam->file && share->db_type() &&
+ share->db_type()->db_type == DB_TYPE_CSV_DB))) // Workaround for CSV
+ {
+ my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN,
+ MYF(0),
+ "Specified storage engine");
+ error_reported= TRUE;
+ goto err;
+ }
+
/* Allocate bitmaps */
bitmap_size= share->column_bitmap_size;
- if (!(bitmaps= (uchar*) alloc_root(&outparam->mem_root, bitmap_size*3)))
+ if (!(bitmaps= (uchar*) alloc_root(&outparam->mem_root, bitmap_size*4)))
goto err;
bitmap_init(&outparam->def_read_set,
(my_bitmap_map*) bitmaps, share->fields, FALSE);
@@ -1872,6 +2289,8 @@ partititon_err:
(my_bitmap_map*) (bitmaps+bitmap_size), share->fields, FALSE);
bitmap_init(&outparam->tmp_set,
(my_bitmap_map*) (bitmaps+bitmap_size*2), share->fields, FALSE);
+ bitmap_init(&outparam->vcol_set,
+ (my_bitmap_map*) (bitmaps+bitmap_size*3), share->fields, FALSE);
outparam->default_column_bitmaps();
/* The table struct is now initialized; Open the table */
@@ -1975,7 +2394,11 @@ int closefrm(register TABLE *table, bool free_share)
if (table->field)
{
for (Field **ptr=table->field ; *ptr ; ptr++)
+ {
+ if ((*ptr)->vcol_info)
+ free_items((*ptr)->vcol_info->item_free_list);
delete *ptr;
+ }
table->field= 0;
}
delete table->file;
@@ -4319,6 +4742,7 @@ void st_table::clear_column_bitmaps()
bitmap_clear_all(&table->def_write_set);
*/
bzero((char*) def_read_set.bitmap, s->column_bitmap_size*2);
+ bzero((char*) def_read_set.bitmap, s->column_bitmap_size*4);
column_bitmaps_set(&def_read_set, &def_write_set);
}
@@ -4404,7 +4828,14 @@ void st_table::mark_columns_used_by_index_no_reset(uint index,
KEY_PART_INFO *key_part_end= (key_part +
key_info[index].key_parts);
for (;key_part != key_part_end; key_part++)
+ {
bitmap_set_bit(bitmap, key_part->fieldnr-1);
+ if (key_part->field->vcol_info &&
+ key_part->field->vcol_info->expr_item)
+ key_part->field->vcol_info->
+ expr_item->walk(&Item::register_field_in_bitmap,
+ 1, (uchar *) bitmap);
+ }
}
@@ -4531,6 +4962,8 @@ void st_table::mark_columns_needed_for_update()
file->column_bitmaps_signal();
}
}
+ /* Mark all virtual columns needed for update */
+ mark_virtual_columns_for_write();
DBUG_VOID_RETURN;
}
@@ -4557,9 +4990,100 @@ void st_table::mark_columns_needed_for_insert()
}
if (found_next_number_field)
mark_auto_increment_column();
+ /* Mark virtual columns for insert */
+ mark_virtual_columns_for_write();
}
+/*
+ @brief Mark a column as virtual used by the query
+
+ @param field the field for the column to be marked
+
+ @details
+ The function marks the column for 'field' as virtual (computed)
+ in the bitmap vcol_set.
+ If the column is marked for the first time the expression to compute
+ the column is traversed and all columns that are occurred there are
+ marked in the read_set of the table.
+
+ @retval
+ TRUE if column is marked for the first time
+ @retval
+ FALSE otherwise
+*/
+
+bool st_table::mark_virtual_col(Field *field)
+{
+ bool res;
+ DBUG_ASSERT(field->vcol_info);
+ if (!(res= bitmap_fast_test_and_set(&vcol_set, field->field_index)))
+ {
+ Item *vcol_item= field->vcol_info->expr_item;
+ DBUG_ASSERT(vcol_item);
+ vcol_item->walk(&Item::register_field_in_read_map, 1, (uchar *) 0);
+ }
+ return res;
+}
+
+
+/*
+ @brief Mark virtual columns for update/insert commands
+
+ @details
+ The function marks virtual columns used in a update/insert commands
+ in the vcol_set bitmap.
+ If a virtual column is from write_set it is always marked in vcol_set.
+ If a stored virtual column is not from write_set but it is computed
+ through columns from write_set it is also marked in vcol_set, and,
+ besides, it is added to write_set.
+
+ @return void
+
+ @note
+ Let table t1 have columns a,b,c and let column c be a stored virtual
+ column computed through columns a and b. Then for the query
+ UPDATE t1 SET a=1
+ column c will be placed into vcol_set and into write_set while
+ column b will be placed into read_set.
+ If column c was a virtual column, but not a stored virtual column
+ then it would not be added to any of the sets. Column b would not
+ be added to read_set either.
+*/
+
+void st_table::mark_virtual_columns_for_write(void)
+{
+ Field **vfield_ptr, *tmp_vfield;
+ bool bitmap_updated= FALSE;
+
+ for (vfield_ptr= vfield; *vfield_ptr; vfield_ptr++)
+ {
+ tmp_vfield= *vfield_ptr;
+ if (bitmap_is_set(write_set, tmp_vfield->field_index))
+ bitmap_updated= mark_virtual_col(tmp_vfield);
+ else if (tmp_vfield->stored_in_db)
+ {
+ MY_BITMAP *save_read_set;
+ Item *vcol_item= tmp_vfield->vcol_info->expr_item;
+ DBUG_ASSERT(vcol_item);
+ bitmap_clear_all(&tmp_set);
+ save_read_set= read_set;
+ read_set= &tmp_set;
+ vcol_item->walk(&Item::register_field_in_read_map, 1, (uchar *) 0);
+ read_set= save_read_set;
+ bitmap_intersect(&tmp_set, write_set);
+ if (!bitmap_is_clear_all(&tmp_set))
+ {
+ bitmap_set_bit(write_set, tmp_vfield->field_index);
+ mark_virtual_col(tmp_vfield);
+ bitmap_updated= TRUE;
+ }
+ }
+ }
+ if (bitmap_updated)
+ file->column_bitmaps_signal();
+}
+
/**
@brief Check if this is part of a MERGE table with attached children.
@@ -4567,7 +5091,7 @@ void st_table::mark_columns_needed_for_insert()
@retval TRUE children are attached
@retval FALSE no MERGE part or children not attached
- @detail
+ @details
A MERGE table consists of a parent TABLE and zero or more child
TABLEs. Each of these TABLEs is called a part of a MERGE table.
*/
@@ -4828,6 +5352,54 @@ size_t max_row_length(TABLE *table, const uchar *data)
return length;
}
+/*
+ @brief Compute values for virtual columns used in query
+
+ @param table The TABLE object
+ @param for_write Requests to compute only fields needed for write
+
+ @details
+ The function computes the values of the virtual columns of the table and
+ stores them in the table record buffer.
+ Only fields from vcol_set are computed, and, when the flag for_write is not
+ set to TRUE, a virtual field is computed only if it's not stored.
+ The flag for_write is set to TRUE for row insert/update operations.
+
+ @retval
+ 0 Success
+ @retval
+ >0 Error occurred when storing a virtual field value
+*/
+
+int update_virtual_fields(TABLE *table, bool for_write)
+{
+ DBUG_ENTER("update_virtual_fields");
+ Field **vfield_ptr, *vfield;
+ int error= 0;
+ if (!table || !table->vfield)
+ DBUG_RETURN(0);
+
+ /* Iterate over virtual fields in the table */
+ for (vfield_ptr= table->vfield; *vfield_ptr; vfield_ptr++)
+ {
+ vfield= (*vfield_ptr);
+ DBUG_ASSERT(vfield->vcol_info && vfield->vcol_info->expr_item);
+ /* Only update those fields that are marked in the vcol_set bitmap */
+ if (bitmap_is_set(&table->vcol_set, vfield->field_index) &&
+ (for_write || !vfield->stored_in_db))
+ {
+ /* Compute the actual value of the virtual fields */
+ error= vfield->vcol_info->expr_item->save_in_field(vfield, 0);
+ DBUG_PRINT("info", ("field '%s' - updated", vfield->field_name));
+ }
+ else
+ {
+ DBUG_PRINT("info", ("field '%s' - skipped", vfield->field_name));
+ }
+ }
+ DBUG_RETURN(0);
+}
+
/*****************************************************************************
** Instansiate templates
*****************************************************************************/
diff --git a/sql/table.h b/sql/table.h
index acf1d797b69..7315e48718b 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -352,6 +352,8 @@ typedef struct st_table_share
ulong version, mysql_version;
ulong timestamp_offset; /* Set to offset+1 of record */
ulong reclength; /* Recordlength */
+ /* Stored record length. No generated-only virtual fields are included */
+ ulong stored_rec_length;
plugin_ref db_plugin; /* storage engine plugin */
inline handlerton *db_type() const /* table_type for handler */
@@ -372,6 +374,8 @@ typedef struct st_table_share
uint key_block_size; /* create key_block_size, if used */
uint null_bytes, last_null_bit_pos;
uint fields; /* Number of fields */
+ /* Number of stored fields, generated-only virtual fields are not included */
+ uint stored_fields;
uint rec_buff_length; /* Size of table->record[] buffer */
uint keys, key_parts;
uint max_key_length, max_unique_length, total_key_length;
@@ -393,6 +397,7 @@ typedef struct st_table_share
uint error, open_errno, errarg; /* error from open_table_def() */
uint column_bitmap_size;
uchar frm_version;
+ uint vfields; /* Number of computed (virtual) fields */
bool null_field_first;
bool system; /* Set if system table (one record) */
bool crypted; /* If .frm file is crypted */
@@ -657,6 +662,7 @@ struct st_table {
Field *next_number_field; /* Set if next_number is activated */
Field *found_next_number_field; /* Set on open */
Field_timestamp *timestamp_field;
+ Field **vfield; /* Pointer to virtual fields*/
/* Table's triggers, 0 if there are no of them */
Table_triggers_list *triggers;
@@ -666,6 +672,7 @@ struct st_table {
uchar *null_flags;
my_bitmap_map *bitmap_init_value;
MY_BITMAP def_read_set, def_write_set, tmp_set; /* containers */
+ MY_BITMAP vcol_set; /* set of used virtual columns */
MY_BITMAP *read_set, *write_set; /* Active column sets */
/*
The ID of the query that opened and is using this table. Has different
@@ -831,6 +838,8 @@ struct st_table {
void mark_columns_needed_for_update(void);
void mark_columns_needed_for_delete(void);
void mark_columns_needed_for_insert(void);
+ bool mark_virtual_col(Field *field);
+ void mark_virtual_columns_for_write(void);
inline void column_bitmaps_set(MY_BITMAP *read_set_arg,
MY_BITMAP *write_set_arg)
{
@@ -890,6 +899,7 @@ typedef struct st_foreign_key_info
enum enum_schema_tables
{
SCH_CHARSETS= 0,
+ SCH_CLIENT_STATS,
SCH_COLLATIONS,
SCH_COLLATION_CHARACTER_SET_APPLICABILITY,
SCH_COLUMNS,
@@ -899,6 +909,7 @@ enum enum_schema_tables
SCH_FILES,
SCH_GLOBAL_STATUS,
SCH_GLOBAL_VARIABLES,
+ SCH_INDEX_STATS,
SCH_KEY_COLUMN_USAGE,
SCH_OPEN_TABLES,
SCH_PARTITIONS,
@@ -917,8 +928,10 @@ enum enum_schema_tables
SCH_TABLE_CONSTRAINTS,
SCH_TABLE_NAMES,
SCH_TABLE_PRIVILEGES,
+ SCH_TABLE_STATS,
SCH_TRIGGERS,
SCH_USER_PRIVILEGES,
+ SCH_USER_STATS,
SCH_VARIABLES,
SCH_VIEWS
};
diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc
index 0764fe8be33..83c4a8ee2a0 100644
--- a/sql/thr_malloc.cc
+++ b/sql/thr_malloc.cc
@@ -59,11 +59,13 @@ void init_sql_alloc(MEM_ROOT *mem_root, uint block_size, uint pre_alloc)
}
+#ifndef MYSQL_CLIENT
void *sql_alloc(size_t Size)
{
MEM_ROOT *root= *my_pthread_getspecific_ptr(MEM_ROOT**,THR_MALLOC);
return alloc_root(root,Size);
}
+#endif
void *sql_calloc(size_t size)
diff --git a/sql/tztime.cc b/sql/tztime.cc
index cd6e63be039..3802c284057 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1676,7 +1676,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt= 0;
- res= table->file->index_first(table->record[0]);
+ res= table->file->ha_index_first(table->record[0]);
while (!res)
{
@@ -1698,7 +1698,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
tz_lsis[tz_leapcnt-1].ls_corr));
- res= table->file->index_next(table->record[0]);
+ res= table->file->ha_index_next(table->record[0]);
}
(void)table->file->ha_index_end();
@@ -1865,8 +1865,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
*/
(void)table->file->ha_index_init(0, 1);
- if (table->file->index_read_map(table->record[0], table->field[0]->ptr,
- HA_WHOLE_KEY, HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
{
#ifdef EXTRA_DEBUG
/*
@@ -1893,8 +1893,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table->field[0]->store((longlong) tzid, TRUE);
(void)table->file->ha_index_init(0, 1);
- if (table->file->index_read_map(table->record[0], table->field[0]->ptr,
- HA_WHOLE_KEY, HA_READ_KEY_EXACT))
+ if (table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
{
sql_print_error("Can't find description of time zone '%u'", tzid);
goto end;
@@ -1920,8 +1920,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table->field[0]->store((longlong) tzid, TRUE);
(void)table->file->ha_index_init(0, 1);
- res= table->file->index_read_map(table->record[0], table->field[0]->ptr,
- (key_part_map)1, HA_READ_KEY_EXACT);
+ res= table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
+ (key_part_map)1, HA_READ_KEY_EXACT);
while (!res)
{
ttid= (uint)table->field[1]->val_int();
@@ -1968,8 +1968,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tmp_tz_info.typecnt= ttid + 1;
- res= table->file->index_next_same(table->record[0],
- table->field[0]->ptr, 4);
+ res= table->file->ha_index_next_same(table->record[0],
+ table->field[0]->ptr, 4);
}
if (res != HA_ERR_END_OF_FILE)
@@ -1991,8 +1991,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
table->field[0]->store((longlong) tzid, TRUE);
(void)table->file->ha_index_init(0, 1);
- res= table->file->index_read_map(table->record[0], table->field[0]->ptr,
- (key_part_map)1, HA_READ_KEY_EXACT);
+ res= table->file->ha_index_read_map(table->record[0], table->field[0]->ptr,
+ (key_part_map)1, HA_READ_KEY_EXACT);
while (!res)
{
ttime= (my_time_t)table->field[1]->val_int();
@@ -2021,8 +2021,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
tzid, (ulong) ttime, ttid));
- res= table->file->index_next_same(table->record[0],
- table->field[0]->ptr, 4);
+ res= table->file->ha_index_next_same(table->record[0],
+ table->field[0]->ptr, 4);
}
/*
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 7087ec93804..78d375e663a 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -583,7 +583,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
{
uint length,int_count,int_length,no_empty, int_parts;
uint time_stamp_pos,null_fields;
- ulong reclength, totlength, n_length, com_length;
+ ulong reclength, totlength, n_length, com_length, vcol_info_length;
DBUG_ENTER("pack_header");
if (create_fields.elements > MAX_FIELDS)
@@ -594,8 +594,8 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
totlength= 0L;
reclength= data_offset;
- no_empty=int_count=int_parts=int_length=time_stamp_pos=null_fields=
- com_length=0;
+ no_empty=int_count=int_parts=int_length=time_stamp_pos=null_fields=0;
+ com_length=vcol_info_length=0;
n_length=2L;
/* Check fields */
@@ -623,6 +623,30 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
field->field_name, tmp_len);
field->comment.length= tmp_len;
}
+ if (field->vcol_info)
+ {
+ tmp_len=
+ system_charset_info->cset->charpos(system_charset_info,
+ field->vcol_info->expr_str.str,
+ field->vcol_info->expr_str.str +
+ field->vcol_info->expr_str.length,
+ VIRTUAL_COLUMN_EXPRESSION_MAXLEN);
+
+ if (tmp_len < field->vcol_info->expr_str.length)
+ {
+ my_error(ER_WRONG_STRING_LENGTH, MYF(0),
+ field->vcol_info->expr_str.str,"VIRTUAL COLUMN EXPRESSION",
+ (uint) VIRTUAL_COLUMN_EXPRESSION_MAXLEN);
+ DBUG_RETURN(1);
+ }
+ /*
+ Sum up the length of the expression string and the length of the
+ mandatory header to the total length of info on the defining
+ expressions saved in the frm file for virtual columns.
+ */
+ vcol_info_length+= field->vcol_info->expr_str.length+
+ (uint)FRM_VCOL_HEADER_SIZE;
+ }
totlength+= field->length;
com_length+= field->comment.length;
@@ -642,8 +666,6 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
!time_stamp_pos)
time_stamp_pos= (uint) field->offset+ (uint) data_offset + 1;
length=field->pack_length;
- /* Ensure we don't have any bugs when generating offsets */
- DBUG_ASSERT(reclength == field->offset + data_offset);
if ((uint) field->offset+ (uint) data_offset+ length > reclength)
reclength=(uint) (field->offset+ data_offset + length);
n_length+= (ulong) strlen(field->field_name)+1;
@@ -710,7 +732,8 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
/* Hack to avoid bugs with small static rows in MySQL */
reclength=max(file->min_record_length(table_options),reclength);
if (info_length+(ulong) create_fields.elements*FCOMP+288+
- n_length+int_length+com_length > 65535L || int_count > 255)
+ n_length+int_length+com_length+vcol_info_length > 65535L ||
+ int_count > 255)
{
my_message(ER_TOO_MANY_FIELDS, ER(ER_TOO_MANY_FIELDS), MYF(0));
DBUG_RETURN(1);
@@ -718,7 +741,7 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
bzero((char*)forminfo,288);
length=(info_length+create_fields.elements*FCOMP+288+n_length+int_length+
- com_length);
+ com_length+vcol_info_length);
int2store(forminfo,length);
forminfo[256] = (uint8) screens;
int2store(forminfo+258,create_fields.elements);
@@ -735,7 +758,8 @@ static bool pack_header(uchar *forminfo, enum legacy_db_type table_type,
int2store(forminfo+280,22); /* Rows needed */
int2store(forminfo+282,null_fields);
int2store(forminfo+284,com_length);
- /* Up to forminfo+288 is free to use for additional information */
+ int2store(forminfo+286,vcol_info_length);
+ /* forminfo+288 is free to use for additional information */
DBUG_RETURN(0);
} /* pack_header */
@@ -774,7 +798,7 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
ulong data_offset)
{
reg2 uint i;
- uint int_count, comment_length=0;
+ uint int_count, comment_length= 0, vcol_info_length=0;
uchar buff[MAX_FIELD_WIDTH];
Create_field *field;
DBUG_ENTER("pack_fields");
@@ -787,6 +811,7 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
while ((field=it++))
{
uint recpos;
+ uint cur_vcol_expr_len= 0;
buff[0]= (uchar) field->row;
buff[1]= (uchar) field->col;
buff[2]= (uchar) field->sc_length;
@@ -809,6 +834,17 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
buff[14]= (uchar) field->charset->number;
else
buff[14]= 0; // Numerical
+ if (field->vcol_info)
+ {
+ /*
+ Use the interval_id place in the .frm file to store the length of
+ the additional data saved for the virtual field
+ */
+ buff[12]= cur_vcol_expr_len= field->vcol_info->expr_str.length +
+ (uint)FRM_VCOL_HEADER_SIZE;
+ vcol_info_length+= cur_vcol_expr_len+(uint)FRM_VCOL_HEADER_SIZE;
+ buff[13]= (uchar) MYSQL_TYPE_VIRTUAL;
+ }
int2store(buff+15, field->comment.length);
comment_length+= field->comment.length;
set_if_bigger(int_count,field->interval_id);
@@ -903,6 +939,34 @@ static bool pack_fields(File file, List<Create_field> &create_fields,
DBUG_RETURN(1);
}
}
+ if (vcol_info_length)
+ {
+ it.rewind();
+ int_count=0;
+ while ((field=it++))
+ {
+ /*
+ Pack each virtual field as follows:
+ byte 1 = 1 (always 1 to allow for future extensions)
+ byte 2 = sql_type
+ byte 3 = flags (as of now, 0 - no flags, 1 - field is physically stored)
+ byte 4-... = virtual column expression (text data)
+ */
+ if (field->vcol_info && field->vcol_info->expr_str.length)
+ {
+ buff[0]= (uchar)1;
+ buff[1]= (uchar) field->sql_type;
+ buff[2]= (uchar) field->stored_in_db;
+ if (my_write(file, buff, 3, MYF_RW))
+ DBUG_RETURN(1);
+ if (my_write(file,
+ (uchar*) field->vcol_info->expr_str.str,
+ field->vcol_info->expr_str.length,
+ MYF_RW))
+ DBUG_RETURN(1);
+ }
+ }
+ }
DBUG_RETURN(0);
}
diff --git a/sql/unireg.h b/sql/unireg.h
index 4fbde3b8631..ec4ecf6a10f 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -212,6 +212,13 @@
#define DEFAULT_KEY_CACHE_NAME "default"
+/* The length of the header part for each virtual column in the .frm file */
+#define FRM_VCOL_HEADER_SIZE 3
+
+/* Maximum length of the defining expression for a virtual columns */
+#define VIRTUAL_COLUMN_EXPRESSION_MAXLEN 255 - FRM_VCOL_HEADER_SIZE
+
+
/* Include prototypes for unireg */
#include "mysqld_error.h"