summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc47
-rw-r--r--sql/field.h8
-rw-r--r--sql/field_conv.cc3
-rw-r--r--sql/ha_berkeley.cc8
-rw-r--r--sql/ha_heap.cc8
-rw-r--r--sql/ha_innodb.cc148
-rw-r--r--sql/ha_innodb.h4
-rw-r--r--sql/ha_isam.cc8
-rw-r--r--sql/ha_isammrg.cc4
-rw-r--r--sql/ha_myisam.cc8
-rw-r--r--sql/ha_myisammrg.cc8
-rw-r--r--sql/ha_ndbcluster.cc9
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/handler.cc16
-rw-r--r--sql/handler.h1
-rw-r--r--sql/item.cc16
-rw-r--r--sql/item_timefunc.cc2
-rw-r--r--sql/mysql_priv.h4
-rw-r--r--sql/mysqld.cc16
-rw-r--r--sql/set_var.cc8
-rw-r--r--sql/sql_acl.cc32
-rw-r--r--sql/sql_base.cc45
-rw-r--r--sql/sql_insert.cc24
-rw-r--r--sql/sql_load.cc2
-rw-r--r--sql/sql_parse.cc12
-rw-r--r--sql/sql_select.cc40
-rw-r--r--sql/sql_show.cc62
-rw-r--r--sql/sql_table.cc8
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_yacc.yy15
-rw-r--r--sql/table.h31
31 files changed, 396 insertions, 208 deletions
diff --git a/sql/field.cc b/sql/field.cc
index a3a19d93e58..eaa1ca2bcca 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2932,11 +2932,12 @@ void Field_double::sql_type(String &res) const
*/
Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg,
const char *field_name_arg,
struct st_table *table_arg,
CHARSET_INFO *cs)
- :Field_str(ptr_arg, 19, (uchar*) 0,0,
+ :Field_str(ptr_arg, 19, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, table_arg, cs)
{
/* For 4.0 MYD and 4.0 InnoDB compatibility */
@@ -2952,23 +2953,33 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg,
/*
- Sets TABLE::timestamp_default_now and TABLE::timestamp_on_update_now
- members according to unireg type of this TIMESTAMP field.
-
+ Get auto-set type for TIMESTAMP field.
+
SYNOPSIS
- Field_timestamp::set_timestamp_offsets()
-
+ get_auto_set_type()
+
+ DESCRIPTION
+ Returns value indicating during which operations this TIMESTAMP field
+ should be auto-set to current timestamp.
*/
-void Field_timestamp::set_timestamp_offsets()
+timestamp_auto_set_type Field_timestamp::get_auto_set_type() const
{
- ulong timestamp= (ulong) (ptr - (char*) table->record[0]) + 1;
-
- DBUG_ASSERT(table->timestamp_field == this && unireg_check != NONE);
-
- table->timestamp_default_now=
- (unireg_check == TIMESTAMP_UN_FIELD)? 0 : timestamp;
- table->timestamp_on_update_now=
- (unireg_check == TIMESTAMP_DN_FIELD)? 0 : timestamp;
+ switch (unireg_check)
+ {
+ case TIMESTAMP_DN_FIELD:
+ return TIMESTAMP_AUTO_SET_ON_INSERT;
+ case TIMESTAMP_UN_FIELD:
+ return TIMESTAMP_AUTO_SET_ON_UPDATE;
+ case TIMESTAMP_DNUN_FIELD:
+ return TIMESTAMP_AUTO_SET_ON_BOTH;
+ default:
+ /*
+ Normally this function should not be called for TIMESTAMPs without
+ auto-set property.
+ */
+ DBUG_ASSERT(0);
+ return TIMESTAMP_NO_AUTO_SET;
+ }
}
@@ -3267,6 +3278,7 @@ void Field_timestamp::sql_type(String &res) const
void Field_timestamp::set_time()
{
long tmp= (long) table->in_use->query_start();
+ set_notnull();
#ifdef WORDS_BIGENDIAN
if (table->db_low_byte_first)
{
@@ -5985,8 +5997,9 @@ Field *make_field(char *ptr, uint32 field_length,
f_is_zerofill(pack_flag) != 0,
f_is_dec(pack_flag) == 0);
case FIELD_TYPE_TIMESTAMP:
- return new Field_timestamp(ptr,field_length,
- unireg_check, field_name, table, field_charset);
+ return new Field_timestamp(ptr,field_length, null_pos, null_bit,
+ unireg_check, field_name, table,
+ field_charset);
case FIELD_TYPE_YEAR:
return new Field_year(ptr,field_length,null_pos,null_bit,
unireg_check, field_name, table);
diff --git a/sql/field.h b/sql/field.h
index 3d22c6904a7..69410f4e6af 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -683,6 +683,7 @@ public:
class Field_timestamp :public Field_str {
public:
Field_timestamp(char *ptr_arg, uint32 len_arg,
+ uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const char *field_name_arg,
struct st_table *table_arg,
CHARSET_INFO *cs);
@@ -712,8 +713,11 @@ public:
else
Field::set_default();
}
- inline long get_timestamp()
+ /* Get TIMESTAMP field value as seconds since begging of Unix Epoch */
+ inline long get_timestamp(my_bool *null_value)
{
+ if ((*null_value= is_null()))
+ return 0;
#ifdef WORDS_BIGENDIAN
if (table->db_low_byte_first)
return sint4korr(ptr);
@@ -725,7 +729,7 @@ public:
bool get_date(TIME *ltime,uint fuzzydate);
bool get_time(TIME *ltime);
field_cast_enum field_cast_type() { return FIELD_CAST_TIMESTAMP; }
- void set_timestamp_offsets();
+ timestamp_auto_set_type get_auto_set_type() const;
};
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index d7993939092..c9b21b5f96f 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -164,7 +164,8 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
/*
Check if this is a special type, which will get a special walue
- when set to NULL
+ when set to NULL (TIMESTAMP fields which allow setting to NULL
+ are handled by first check).
*/
if (field->type() == FIELD_TYPE_TIMESTAMP)
{
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 32f623b86c9..c688f3c3597 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -856,8 +856,8 @@ int ha_berkeley::write_row(byte * record)
DBUG_ENTER("write_row");
statistic_increment(ha_write_count,&LOCK_status);
- if (table->timestamp_default_now)
- update_timestamp(record+table->timestamp_default_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
if (table->next_number_field && record == table->record[0])
update_auto_increment();
if ((error=pack_row(&row, record,1)))
@@ -1103,8 +1103,8 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
LINT_INIT(error);
statistic_increment(ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_row+table->timestamp_on_update_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
if (hidden_primary_key)
{
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index d7327362286..5be51ec8494 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -87,8 +87,8 @@ void ha_heap::set_keys_for_scanning(void)
int ha_heap::write_row(byte * buf)
{
statistic_increment(ha_write_count,&LOCK_status);
- if (table->timestamp_default_now)
- update_timestamp(buf+table->timestamp_default_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
update_auto_increment();
return heap_write(file,buf);
@@ -97,8 +97,8 @@ int ha_heap::write_row(byte * buf)
int ha_heap::update_row(const byte * old_data, byte * new_data)
{
statistic_increment(ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_data+table->timestamp_on_update_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
return heap_update(file,old_data,new_data);
}
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index bf1be6f5d7e..132bb835d82 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -41,6 +41,7 @@ have disables the InnoDB inlining in this file. */
#include <hash.h>
#include <myisampack.h>
#include <mysys_err.h>
+#include <my_sys.h>
#define MAX_ULONG_BIT ((ulong) 1 << (sizeof(ulong)*8-1))
@@ -97,7 +98,7 @@ long innobase_mirrored_log_groups, innobase_log_files_in_group,
innobase_buffer_pool_size, innobase_additional_mem_pool_size,
innobase_file_io_threads, innobase_lock_wait_timeout,
innobase_thread_concurrency, innobase_force_recovery,
- innobase_open_files, innobase_auto_extend_increment;
+ innobase_open_files;
/* The default values for the following char* start-up parameters
are determined in innobase_init below: */
@@ -736,15 +737,35 @@ innobase_invalidate_query_cache(
}
/*********************************************************************
-Get the quote character to be used in SQL identifiers. */
+Get the quote character to be used in SQL identifiers.
+This definition must match the one in innobase/ut/ut0ut.c! */
extern "C"
-char
-mysql_get_identifier_quote_char(void)
-/*=================================*/
+int
+mysql_get_identifier_quote_char(
+/*============================*/
/* out: quote character to be
- used in SQL identifiers */
+ used in SQL identifiers; EOF if none */
+ trx_t* trx, /* in: transaction */
+ const char* name, /* in: name to print */
+ ulint namelen)/* in: length of name */
+{
+ if (!trx || !trx->mysql_thd) {
+ return(EOF);
+ }
+ return(get_quote_char_for_identifier((THD*) trx->mysql_thd,
+ name, namelen));
+}
+
+/**************************************************************************
+Obtain a pointer to the MySQL THD object, as in current_thd(). This
+definition must match the one in sql/ha_innodb.cc! */
+extern "C"
+void*
+innobase_current_thd(void)
+/*======================*/
+ /* out: MySQL THD object */
{
- return '`';
+ return(current_thd);
}
/*********************************************************************
@@ -963,7 +984,6 @@ innobase_init(void)
srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog;
srv_max_n_open_files = (ulint) innobase_open_files;
- srv_auto_extend_increment = (ulint) innobase_auto_extend_increment;
srv_innodb_status = (ibool) innobase_create_status_file;
srv_print_verbose_log = mysql_embedded ? 0 : 1;
@@ -1484,12 +1504,14 @@ ha_innobase::open(
{
dict_table_t* ib_table;
char norm_name[1000];
+ THD* thd;
DBUG_ENTER("ha_innobase::open");
UT_NOT_USED(mode);
UT_NOT_USED(test_if_locked);
+ thd = current_thd;
normalize_table_name(norm_name, name);
user_thd = NULL;
@@ -1539,7 +1561,7 @@ ha_innobase::open(
DBUG_RETURN(1);
}
- if (ib_table->ibd_file_missing && !current_thd->tablespace_op) {
+ if (ib_table->ibd_file_missing && !thd->tablespace_op) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB error:\n"
"MySQL is trying to open a table handle but the .ibd file for\n"
@@ -2218,8 +2240,8 @@ ha_innobase::write_row(
statistic_increment(ha_write_count, &LOCK_status);
- if (table->timestamp_default_now)
- update_timestamp(record + table->timestamp_default_now - 1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
if (last_query_id != user_thd->query_id) {
prebuilt->sql_stat_start = TRUE;
@@ -2590,8 +2612,8 @@ ha_innobase::update_row(
ut_ad(prebuilt->trx ==
(trx_t*) current_thd->transaction.all.innobase_tid);
- if (table->timestamp_on_update_now)
- update_timestamp(new_row + table->timestamp_on_update_now - 1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
if (last_query_id != user_thd->query_id) {
prebuilt->sql_stat_start = TRUE;
@@ -2863,7 +2885,7 @@ ha_innobase::index_read(
(ulint)upd_and_key_val_buff_len,
index,
(byte*) key_ptr,
- (ulint) key_len);
+ (ulint) key_len, prebuilt->trx);
} else {
/* We position the cursor to the last or the first entry
in the index */
@@ -4075,14 +4097,16 @@ ha_innobase::records_in_range(
index,
(byte*) (min_key ? min_key->key :
(const mysql_byte*) 0),
- (ulint) (min_key ? min_key->length : 0));
+ (ulint) (min_key ? min_key->length : 0),
+ prebuilt->trx);
row_sel_convert_mysql_key_to_innobase(
range_end, (byte*) key_val_buff2,
buff2_len, index,
(byte*) (max_key ? max_key->key :
(const mysql_byte*) 0),
- (ulint) (max_key ? max_key->length : 0));
+ (ulint) (max_key ? max_key->length : 0),
+ prebuilt->trx);
mode1 = convert_search_mode_to_innobase(min_key ? min_key->flag :
HA_READ_KEY_EXACT);
@@ -4469,7 +4493,8 @@ ha_innobase::update_table_comment(
(ulong) fsp_get_available_space_in_free_extents(
prebuilt->table->space));
- dict_print_info_on_foreign_keys(FALSE, file, prebuilt->table);
+ dict_print_info_on_foreign_keys(FALSE, file,
+ prebuilt->trx, prebuilt->table);
flen = ftell(file);
if(length + flen + 3 > 64000) {
flen = 64000 - 3 - length;
@@ -4535,7 +4560,8 @@ ha_innobase::get_foreign_key_create_info(void)
trx_search_latch_release_if_reserved(prebuilt->trx);
/* output the data to a temporary file */
- dict_print_info_on_foreign_keys(TRUE, file, prebuilt->table);
+ dict_print_info_on_foreign_keys(TRUE, file,
+ prebuilt->trx, prebuilt->table);
prebuilt->trx->op_info = (char*)"";
flen = ftell(file);
@@ -5232,8 +5258,7 @@ innobase_store_binlog_offset_and_flush_log(
/*=============================*/
char *binlog_name, /* in: binlog name */
longlong offset /* in: binlog offset */
-)
-{
+) {
mtr_t mtr;
assert(binlog_name != NULL);
@@ -5270,4 +5295,87 @@ ulonglong ha_innobase::get_mysql_bin_log_pos()
return trx_sys_mysql_bin_log_pos;
}
+extern "C" {
+/**********************************************************************
+This function is used to find the storage length in bytes of the first n
+characters for prefix indexes using a multibyte character set. The function
+finds charset information and returns length of prefix_len characters in the
+index field in bytes.
+
+NOTE: the prototype of this function is copied to data0type.c! If you change
+this function, you MUST change also data0type.c! */
+
+ulint
+innobase_get_at_most_n_mbchars(
+/*===========================*/
+ /* out: number of bytes occupied by the first
+ n characters */
+ ulint charset_id, /* in: character set id */
+ ulint prefix_len, /* in: prefix length in bytes of the index
+ (this has to be divided by mbmaxlen to get the
+ number of CHARACTERS n in the prefix) */
+ ulint data_len, /* in: length of the string in bytes */
+ const char* str) /* in: character string */
+{
+ ulint char_length; /* character length in bytes */
+ ulint n_chars; /* number of characters in prefix */
+ CHARSET_INFO* charset; /* charset used in the field */
+
+ charset = get_charset(charset_id, MYF(MY_WME));
+
+ ut_ad(charset);
+ ut_ad(charset->mbmaxlen);
+
+ /* Calculate how many characters at most the prefix index contains */
+
+ n_chars = prefix_len / charset->mbmaxlen;
+
+ /* If the charset is multi-byte, then we must find the length of the
+ first at most n chars in the string. If the string contains less
+ characters than n, then we return the length to the end of the last
+ full character. */
+
+ if (charset->mbmaxlen > 1) {
+/* ulint right_value; */
+
+ /* my_charpos() returns the byte length of the first n_chars
+ characters, or the end of the last full character */
+
+ char_length = my_charpos(charset, str,
+ str + data_len, n_chars);
+
+ /*################################################*/
+ /* TODO: my_charpos sometimes returns a non-sensical value
+ that is BIGGER than data_len: try to fix this bug partly with
+ these heuristics. This is NOT a complete bug fix! */
+
+ if (char_length > data_len) {
+ char_length = data_len;
+ }
+ /*################################################*/
+
+/* printf("data_len %lu, n_chars %lu, char_len %lu\n",
+ data_len, n_chars, char_length);
+ if (data_len < n_chars) {
+ right_value = data_len;
+ } else {
+ right_value = n_chars;
+ }
+
+ if (right_value != char_length) {
+ printf("ERRRRRROOORRRRRRRRRRRR!!!!!!!!!\n");
+ }
+*/
+ } else {
+ if (data_len < prefix_len) {
+ char_length = data_len;
+ } else {
+ char_length = prefix_len;
+ }
+ }
+
+ return(char_length);
+}
+}
+
#endif /* HAVE_INNOBASE_DB */
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 0ef5f3ddffe..d72f5a58fe4 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -183,7 +183,6 @@ extern long innobase_buffer_pool_awe_mem_mb;
extern long innobase_file_io_threads, innobase_lock_wait_timeout;
extern long innobase_force_recovery, innobase_thread_concurrency;
extern long innobase_open_files;
-extern long innobase_auto_extend_increment;
extern char *innobase_data_home_dir, *innobase_data_file_path;
extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
extern char *innobase_unix_file_flush_method;
@@ -194,6 +193,8 @@ extern my_bool innobase_log_archive,
innobase_create_status_file;
extern "C" {
extern ulong srv_max_buf_pool_modified_pct;
+extern ulong srv_auto_extend_increment;
+extern ulong srv_max_purge_lag;
}
extern TYPELIB innobase_lock_typelib;
@@ -229,3 +230,4 @@ my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
void innobase_release_temporary_latches(void* innobase_tid);
void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
+
diff --git a/sql/ha_isam.cc b/sql/ha_isam.cc
index 85ab25a31d9..9de532fa7b0 100644
--- a/sql/ha_isam.cc
+++ b/sql/ha_isam.cc
@@ -70,8 +70,8 @@ uint ha_isam::min_record_length(uint options) const
int ha_isam::write_row(byte * buf)
{
statistic_increment(ha_write_count,&LOCK_status);
- if (table->timestamp_default_now)
- update_timestamp(buf+table->timestamp_default_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
update_auto_increment();
return !nisam_write(file,buf) ? 0 : my_errno ? my_errno : -1;
@@ -80,8 +80,8 @@ int ha_isam::write_row(byte * buf)
int ha_isam::update_row(const byte * old_data, byte * new_data)
{
statistic_increment(ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_data+table->timestamp_on_update_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
return !nisam_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1;
}
diff --git a/sql/ha_isammrg.cc b/sql/ha_isammrg.cc
index 20e2b4db423..367607eef19 100644
--- a/sql/ha_isammrg.cc
+++ b/sql/ha_isammrg.cc
@@ -78,8 +78,8 @@ int ha_isammrg::write_row(byte * buf)
int ha_isammrg::update_row(const byte * old_data, byte * new_data)
{
statistic_increment(ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_data+table->timestamp_on_update_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
return !mrg_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1;
}
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index 95a294764d3..729ec4c27eb 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -251,8 +251,8 @@ int ha_myisam::write_row(byte * buf)
statistic_increment(ha_write_count,&LOCK_status);
/* If we have a timestamp column, update it to the current time */
- if (table->timestamp_default_now)
- update_timestamp(buf+table->timestamp_default_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
/*
If we have an auto_increment column and we are writing a changed row
@@ -1070,8 +1070,8 @@ bool ha_myisam::is_crashed() const
int ha_myisam::update_row(const byte * old_data, byte * new_data)
{
statistic_increment(ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_data+table->timestamp_on_update_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
return mi_update(file,old_data,new_data);
}
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 9aa6d039efb..bf4c2a36ffd 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -82,8 +82,8 @@ int ha_myisammrg::close(void)
int ha_myisammrg::write_row(byte * buf)
{
statistic_increment(ha_write_count,&LOCK_status);
- if (table->timestamp_default_now)
- update_timestamp(buf+table->timestamp_default_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
if (table->next_number_field && buf == table->record[0])
update_auto_increment();
return myrg_write(file,buf);
@@ -92,8 +92,8 @@ int ha_myisammrg::write_row(byte * buf)
int ha_myisammrg::update_row(const byte * old_data, byte * new_data)
{
statistic_increment(ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_data+table->timestamp_on_update_now);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
return myrg_update(file,old_data,new_data);
}
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index e37ebf97828..d7a580dbb88 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -410,7 +410,7 @@ int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field,
*/
int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
- uint fieldnr)
+ uint fieldnr, bool *set_blob_value)
{
const byte* field_ptr= field->ptr;
uint32 pack_len= field->pack_length();
@@ -455,6 +455,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
(unsigned)blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
+ if (set_blob_value)
+ *set_blob_value= true;
// No callback needed to write value
DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0);
}
@@ -1579,11 +1581,12 @@ int ha_ndbcluster::write_row(byte *record)
}
// Set non-key attribute(s)
+ bool set_blob_value= false;
for (i= 0; i < table->fields; i++)
{
Field *field= table->field[i];
if (!(field->flags & PRI_KEY_FLAG) &&
- set_ndb_value(op, field, i))
+ set_ndb_value(op, field, i, &set_blob_value))
{
skip_auto_increment= true;
ERR_RETURN(op->getNdbError());
@@ -1602,7 +1605,7 @@ int ha_ndbcluster::write_row(byte *record)
bulk_insert_not_flushed= true;
if ((rows_to_insert == 1) ||
((rows_inserted % bulk_insert_rows) == 0) ||
- uses_blob_value(false) != 0)
+ set_blob_value)
{
THD *thd= current_thd;
// Send rows to NDB
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 2b0108c7405..36452033516 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -207,7 +207,7 @@ class ha_ndbcluster: public handler
uint fieldnr, const byte* field_ptr);
int set_ndb_key(NdbOperation*, Field *field,
uint fieldnr, const byte* field_ptr);
- int set_ndb_value(NdbOperation*, Field *field, uint fieldnr);
+ int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, bool *set_blob_value= 0);
int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*);
friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg);
int get_ndb_blobs_value(NdbBlob *last_ndb_blob);
@@ -258,6 +258,7 @@ class ha_ndbcluster: public handler
uint32 blobs_buffer_size;
uint dupkey;
+ void set_rec_per_key();
void records_update();
void no_uncommitted_rows_execute_failure();
void no_uncommitted_rows_update(int);
diff --git a/sql/handler.cc b/sql/handler.cc
index c7e7b08ba6e..23b4fbe4835 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -942,22 +942,6 @@ int handler::read_first_row(byte * buf, uint primary_key)
}
-/* Set a timestamp in record */
-
-void handler::update_timestamp(byte *record)
-{
- long skr= (long) current_thd->query_start();
-#ifdef WORDS_BIGENDIAN
- if (table->db_low_byte_first)
- {
- int4store(record,skr);
- }
- else
-#endif
- longstore(record,skr);
- return;
-}
-
/*
Updates field with field_type NEXT_NUMBER according to following:
if field = 0 change field to the next free key in database.
diff --git a/sql/handler.h b/sql/handler.h
index 9806d2e7499..a7ce4e708fd 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -287,7 +287,6 @@ public:
{}
virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
int ha_open(const char *name, int mode, int test_if_locked);
- void update_timestamp(byte *record);
void update_auto_increment();
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
diff --git a/sql/item.cc b/sql/item.cc
index b0eb806cc7a..7b0dcc664c7 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1246,6 +1246,7 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
TABLE_LIST *table_list;
Item **refer= (Item **)not_found_item;
uint counter;
+ bool not_used;
// Prevent using outer fields in subselects, that is not supported now
SELECT_LEX *cursel= (SELECT_LEX *) thd->lex->current_select;
if (cursel->master_unit()->first_select()->linkage != DERIVED_TABLE_TYPE)
@@ -1288,7 +1289,8 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref)
}
if (sl->resolve_mode == SELECT_LEX::SELECT_MODE &&
(refer= find_item_in_list(this, sl->item_list, &counter,
- REPORT_EXCEPT_NOT_FOUND)) !=
+ REPORT_EXCEPT_NOT_FOUND,
+ &not_used)) !=
(Item **) not_found_item)
{
if (*refer && (*refer)->fixed) // Avoid crash in case of error
@@ -1889,6 +1891,7 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference)
{
DBUG_ASSERT(fixed == 0);
uint counter;
+ bool not_used;
if (!ref)
{
TABLE_LIST *where= 0, *table_list;
@@ -1908,13 +1911,13 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference)
first_select()->linkage !=
DERIVED_TABLE_TYPE) ?
REPORT_EXCEPT_NOT_FOUND :
- REPORT_ALL_ERRORS))) ==
+ REPORT_ALL_ERRORS ), &not_used)) ==
(Item **)not_found_item)
{
upward_lookup= 1;
Field *tmp= (Field*) not_found_field;
/*
- We can't find table field in table list of current select,
+ We can't find table field in select list of current select,
consequently we have to find it in outer subselect(s).
We can't join lists of outer & current select, because of scope
of view rules. For example if both tables (outer & current) have
@@ -1929,8 +1932,8 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference)
Item_subselect *prev_subselect_item= prev_unit->item;
if (sl->resolve_mode == SELECT_LEX::SELECT_MODE &&
(ref= find_item_in_list(this, sl->item_list,
- &counter,
- REPORT_EXCEPT_NOT_FOUND)) !=
+ &counter, REPORT_EXCEPT_NOT_FOUND,
+ &not_used)) !=
(Item **)not_found_item)
{
if (*ref && (*ref)->fixed) // Avoid crash in case of error
@@ -1989,8 +1992,7 @@ bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference)
// Call to report error
find_item_in_list(this,
*(thd->lex->current_select->get_item_list()),
- &counter,
- REPORT_ALL_ERRORS);
+ &counter, REPORT_ALL_ERRORS, &not_used);
}
ref= 0;
return 1;
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 8f09fe82c1b..863b041044e 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -989,7 +989,7 @@ longlong Item_func_unix_timestamp::val_int()
{ // Optimize timestamp field
Field *field=((Item_field*) args[0])->field;
if (field->type() == FIELD_TYPE_TIMESTAMP)
- return ((Field_timestamp*) field)->get_timestamp();
+ return ((Field_timestamp*) field)->get_timestamp(&null_value);
}
if (get_arg0_date(&ltime, 0))
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 6742c248d93..5d04d145563 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -636,6 +636,7 @@ int mysqld_show_keys(THD *thd, TABLE_LIST *table);
int mysqld_show_logs(THD *thd);
void append_identifier(THD *thd, String *packet, const char *name,
uint length);
+int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
void mysqld_list_fields(THD *thd,TABLE_LIST *table, const char *wild);
int mysqld_dump_create_info(THD *thd, TABLE *table, int fd = -1);
int mysqld_show_create(THD *thd, TABLE_LIST *table_list);
@@ -705,7 +706,8 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND,
IGNORE_ERRORS};
extern const Item **not_found_item;
Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter,
- find_item_error_report_type report_error);
+ find_item_error_report_type report_error,
+ bool *unaliased);
bool get_key_map_from_key_list(key_map *map, TABLE *table,
List<String> *index_list);
bool insert_fields(THD *thd,TABLE_LIST *tables,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 30722d56b2a..5e40398574b 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -3998,6 +3998,7 @@ enum options_mysqld
OPT_INNODB_BUFFER_POOL_SIZE,
OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE,
+ OPT_INNODB_MAX_PURGE_LAG,
OPT_INNODB_FILE_IO_THREADS,
OPT_INNODB_LOCK_WAIT_TIMEOUT,
OPT_INNODB_THREAD_CONCURRENCY,
@@ -4233,6 +4234,11 @@ Disable with --skip-innodb (will save memory).",
{"innodb_max_dirty_pages_pct", OPT_INNODB_MAX_DIRTY_PAGES_PCT,
"Percentage of dirty pages allowed in bufferpool.", (gptr*) &srv_max_buf_pool_modified_pct,
(gptr*) &srv_max_buf_pool_modified_pct, 0, GET_ULONG, REQUIRED_ARG, 90, 0, 100, 0, 0, 0},
+ {"innodb_max_purge_lag", OPT_INNODB_MAX_PURGE_LAG,
+ "",
+ (gptr*) &srv_max_purge_lag,
+ (gptr*) &srv_max_purge_lag, 0, GET_LONG, REQUIRED_ARG, 0, 0, ~0L,
+ 0, 1L, 0},
{"innodb_status_file", OPT_INNODB_STATUS_FILE,
"Enable SHOW INNODB STATUS output in the innodb_status.<pid> file",
(gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file,
@@ -4712,6 +4718,11 @@ replicating a LOAD DATA INFILE command.",
(gptr*) &innobase_additional_mem_pool_size,
(gptr*) &innobase_additional_mem_pool_size, 0, GET_LONG, REQUIRED_ARG,
1*1024*1024L, 512*1024L, ~0L, 0, 1024, 0},
+ {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT,
+ "Data file autoextend increment in megabytes",
+ (gptr*) &srv_auto_extend_increment,
+ (gptr*) &srv_auto_extend_increment,
+ 0, GET_LONG, REQUIRED_ARG, 8L, 1L, ~0L, 0, 1L, 0},
{"innodb_buffer_pool_awe_mem_mb", OPT_INNODB_BUFFER_POOL_AWE_MEM_MB,
"If Windows AWE is used, the size of InnoDB buffer pool allocated from the AWE memory.",
(gptr*) &innobase_buffer_pool_awe_mem_mb, (gptr*) &innobase_buffer_pool_awe_mem_mb, 0,
@@ -4753,11 +4764,6 @@ replicating a LOAD DATA INFILE command.",
"How many files at the maximum InnoDB keeps open at the same time.",
(gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0,
GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0},
- {"innodb_autoextend_increment", OPT_INNODB_AUTOEXTEND_INCREMENT,
- "Data file autoextend increment in megabytes",
- (gptr*) &innobase_auto_extend_increment,
- (gptr*) &innobase_auto_extend_increment,
- 0, GET_LONG, REQUIRED_ARG, 8L, 1L, ~0L, 0, 1L, 0},
#ifdef HAVE_REPLICATION
/*
Disabled for the 4.1.3 release. Disabling just this paragraph of code is
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 5351d2d12b2..d661470576b 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -351,6 +351,10 @@ sys_var_thd_ulong sys_net_wait_timeout("wait_timeout",
#ifdef HAVE_INNOBASE_DB
sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct",
&srv_max_buf_pool_modified_pct);
+sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment",
+ &srv_auto_extend_increment);
+sys_var_long_ptr sys_innodb_max_purge_lag("innodb_max_purge_lag",
+ &srv_max_purge_lag);
#endif
/* Time/date/datetime formats */
@@ -601,6 +605,8 @@ sys_var *sys_variables[]=
&sys_os,
#ifdef HAVE_INNOBASE_DB
&sys_innodb_max_dirty_pages_pct,
+ &sys_innodb_max_purge_lag,
+ &sys_innodb_autoextend_increment,
#endif
&sys_unique_checks,
&sys_warning_count
@@ -674,6 +680,7 @@ struct show_var_st init_vars[]= {
{"init_slave", (char*) &sys_init_slave, SHOW_SYS},
#ifdef HAVE_INNOBASE_DB
{"innodb_additional_mem_pool_size", (char*) &innobase_additional_mem_pool_size, SHOW_LONG },
+ {sys_innodb_autoextend_increment.name, (char*) &sys_innodb_autoextend_increment, SHOW_SYS},
{"innodb_buffer_pool_awe_mem_mb", (char*) &innobase_buffer_pool_awe_mem_mb, SHOW_LONG },
{"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONG },
{"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR},
@@ -693,6 +700,7 @@ struct show_var_st init_vars[]= {
{"innodb_log_files_in_group", (char*) &innobase_log_files_in_group, SHOW_LONG},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
{sys_innodb_max_dirty_pages_pct.name, (char*) &sys_innodb_max_dirty_pages_pct, SHOW_SYS},
+ {sys_innodb_max_purge_lag.name, (char*) &sys_innodb_max_purge_lag, SHOW_SYS},
{"innodb_mirrored_log_groups", (char*) &innobase_mirrored_log_groups, SHOW_LONG},
{"innodb_open_files", (char*) &innobase_open_files, SHOW_LONG },
{"innodb_thread_concurrency", (char*) &innobase_thread_concurrency, SHOW_LONG },
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index fc68e26c21d..a5284a543e6 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -3622,7 +3622,7 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list)
}
/* Remove db access privileges */
- for (counter= 0 ; counter < acl_dbs.elements ; counter++)
+ for (counter= 0 ; counter < acl_dbs.elements ; )
{
const char *user,*host;
@@ -3637,11 +3637,14 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list)
{
if (replace_db_table(tables[1].table, acl_db->db, *lex_user, ~0, 1))
result= -1;
+ else
+ continue;
}
+ ++counter;
}
/* Remove column access */
- for (counter= 0 ; counter < column_priv_hash.records ; counter++)
+ for (counter= 0 ; counter < column_priv_hash.records ; )
{
const char *user,*host;
GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash,
@@ -3660,19 +3663,26 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list)
~0, 0, 1))
{
result= -1;
- continue;
}
- if (grant_table->cols)
+ else
{
- List<LEX_COLUMN> columns;
- if (replace_column_table(grant_table,tables[3].table, *lex_user,
- columns,
- grant_table->db,
- grant_table->tname,
- ~0, 1))
- result= -1;
+ if (grant_table->cols)
+ {
+ List<LEX_COLUMN> columns;
+ if (replace_column_table(grant_table,tables[3].table, *lex_user,
+ columns,
+ grant_table->db,
+ grant_table->tname,
+ ~0, 1))
+ result= -1;
+ else
+ continue;
+ }
+ else
+ continue;
}
}
+ ++counter;
}
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 32ef0081fa0..2770d93bf26 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -945,7 +945,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
table->keys_in_use_for_query= table->keys_in_use;
table->used_keys= table->keys_for_keyread;
if (table->timestamp_field)
- table->timestamp_field->set_timestamp_offsets();
+ table->timestamp_field_type= table->timestamp_field->get_auto_set_type();
DBUG_ASSERT(table->key_read == 0);
DBUG_RETURN(table);
}
@@ -2082,14 +2082,17 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables,
return not_found_item, report other errors,
return 0
IGNORE_ERRORS Do not report errors, return 0 if error
-
+ unaliased Set to true if item is field which was found
+ by original field name and not by its alias
+ in item list. Set to false otherwise.
+
RETURN VALUES
0 Item is not found or item is not unique,
error message is reported
not_found_item Function was called with
report_error == REPORT_EXCEPT_NOT_FOUND and
item was not found. No error message was reported
- found field
+ found field
*/
// Special Item pointer for find_item_in_list returning
@@ -2098,7 +2101,7 @@ const Item **not_found_item= (const Item**) 0x1;
Item **
find_item_in_list(Item *find, List<Item> &items, uint *counter,
- find_item_error_report_type report_error)
+ find_item_error_report_type report_error, bool *unaliased)
{
List_iterator<Item> li(items);
Item **found=0, **found_unaliased= 0, *item;
@@ -2107,6 +2110,9 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
const char *table_name=0;
bool found_unaliased_non_uniq= 0;
uint unaliased_counter;
+
+ *unaliased= FALSE;
+
if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM)
{
field_name= ((Item_ident*) find)->field_name;
@@ -2134,17 +2140,18 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
/*
If table name is specified we should find field 'field_name' in
table 'table_name'. According to SQL-standard we should ignore
- aliases in this case. Note that we should prefer fields from the
- select list over other fields from the tables participating in
- this select in case of ambiguity.
+ aliases in this case.
+
+ Since we should NOT prefer fields from the select list over
+ other fields from the tables participating in this select in
+ case of ambiguity we have to do extra check outside this function.
We use strcmp for table names and database names as these may be
- case sensitive.
- In cases where they are not case sensitive, they are always in lower
- case.
+ case sensitive. In cases where they are not case sensitive, they
+ are always in lower case.
item_field->field_name and item_field->table_name can be 0x0 if
- item is not fix fielded yet.
+ item is not fix_field()'ed yet.
*/
if (item_field->field_name && item_field->table_name &&
!my_strcasecmp(system_charset_info, item_field->field_name,
@@ -2153,17 +2160,22 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
(!db_name || (item_field->db_name &&
!strcmp(item_field->db_name, db_name))))
{
- if (found)
+ if (found_unaliased)
{
- if ((*found)->eq(item, 0))
- continue; // Same field twice
+ if ((*found_unaliased)->eq(item, 0))
+ continue;
+ /*
+ Two matching fields in select list.
+ We already can bail out because we are searching through
+ unaliased names only and will have duplicate error anyway.
+ */
if (report_error != IGNORE_ERRORS)
my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR),
MYF(0), find->full_name(), current_thd->where);
return (Item**) 0;
}
- found= li.ref();
- *counter= i;
+ found_unaliased= li.ref();
+ unaliased_counter= i;
if (db_name)
break; // Perfect match
}
@@ -2235,6 +2247,7 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter,
{
found= found_unaliased;
*counter= unaliased_counter;
+ *unaliased= TRUE;
}
}
if (found)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 4cbd11c6a15..a0496a04bb2 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -45,8 +45,8 @@ static void unlink_blobs(register TABLE *table);
/*
Check if insert fields are correct.
- Sets table->timestamp_default_now/on_update_now to 0 o leaves it to point
- to timestamp field, depending on if timestamp should be updated or not.
+ Sets table->timestamp_field_type to TIMESTAMP_NO_AUTO_SET or leaves it
+ as is, depending on if timestamp should be updated or not.
*/
int
@@ -67,7 +67,7 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields,
check_grant_all_columns(thd,INSERT_ACL,table))
return -1;
#endif
- table->timestamp_default_now= table->timestamp_on_update_now= 0;
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
}
else
{ // Part field list
@@ -97,7 +97,7 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields,
}
if (table->timestamp_field && // Don't set timestamp if used
table->timestamp_field->query_id == thd->query_id)
- table->timestamp_default_now= table->timestamp_on_update_now= 0;
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
}
// For the values we need select_priv
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -569,7 +569,8 @@ int write_record(TABLE *table,COPY_INFO *info)
*/
if (last_uniq_key(table,key_nr) &&
!table->file->referenced_by_foreign_key() &&
- table->timestamp_default_now == table->timestamp_on_update_now)
+ (table->timestamp_field_type == TIMESTAMP_NO_AUTO_SET ||
+ table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH))
{
if ((error=table->file->update_row(table->record[1],
table->record[0])))
@@ -645,8 +646,7 @@ public:
bool query_start_used,last_insert_id_used,insert_id_used;
int log_query;
ulonglong last_insert_id;
- ulong timestamp_default_now;
- ulong timestamp_on_update_now;
+ timestamp_auto_set_type timestamp_field_type;
uint query_length;
delayed_row(enum_duplicates dup_arg, int log_query_arg)
@@ -940,7 +940,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd)
copy->timestamp_field=
(Field_timestamp*) copy->field[table->timestamp_field_offset];
copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check;
- copy->timestamp_field->set_timestamp_offsets();
+ copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type();
}
/* _rowid is not used with delayed insert */
@@ -995,8 +995,7 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic,
row->last_insert_id_used= thd->last_insert_id_used;
row->insert_id_used= thd->insert_id_used;
row->last_insert_id= thd->last_insert_id;
- row->timestamp_default_now= table->timestamp_default_now;
- row->timestamp_on_update_now= table->timestamp_on_update_now;
+ row->timestamp_field_type= table->timestamp_field_type;
di->rows.push_back(row);
di->stacked_inserts++;
@@ -1335,8 +1334,7 @@ bool delayed_insert::handle_inserts(void)
thd.last_insert_id=row->last_insert_id;
thd.last_insert_id_used=row->last_insert_id_used;
thd.insert_id_used=row->insert_id_used;
- table->timestamp_default_now= row->timestamp_default_now;
- table->timestamp_on_update_now= row->timestamp_on_update_now;
+ table->timestamp_field_type= row->timestamp_field_type;
info.handle_duplicates= row->dup;
if (info.handle_duplicates == DUP_IGNORE ||
@@ -1631,7 +1629,7 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
field=table->field+table->fields - values.elements;
/* Don't set timestamp if used */
- table->timestamp_default_now= table->timestamp_on_update_now= 0;
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
table->next_number_field=table->found_next_number_field;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 78d89ef7aa9..17ab472c87b 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -264,7 +264,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
if (!(error=test(read_info.error)))
{
if (use_timestamp)
- table->timestamp_default_now= table->timestamp_on_update_now= 0;
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
table->next_number_field=table->found_next_number_field;
if (handle_duplicates == DUP_IGNORE ||
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 2c5ec34b867..e8441c05609 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -4142,7 +4142,12 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
}
else if (default_value->type() == Item::NULL_ITEM)
{
- default_value=0;
+ /*
+ TIMESTAMP type should be able to distingush non-specified default
+ value and default value NULL later.
+ */
+ if (type != FIELD_TYPE_TIMESTAMP)
+ default_value= 0;
if ((type_modifier & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) ==
NOT_NULL_FLAG)
{
@@ -4334,7 +4339,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
new_field->length=((new_field->length+1)/2)*2; /* purecov: inspected */
new_field->length= min(new_field->length,14); /* purecov: inspected */
}
- new_field->flags|= ZEROFILL_FLAG | UNSIGNED_FLAG | NOT_NULL_FLAG;
+ new_field->flags|= ZEROFILL_FLAG | UNSIGNED_FLAG;
if (default_value)
{
/* Grammar allows only NOW() value for ON UPDATE clause */
@@ -4352,6 +4357,9 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type,
else
new_field->unireg_check= (on_update_value?Field::TIMESTAMP_UN_FIELD:
Field::NONE);
+
+ if (default_value->type() == Item::NULL_ITEM)
+ new_field->def= 0;
}
else
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index e8e111a9a37..156c20edc0c 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -6923,7 +6923,10 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
reverse=flag; // Remember if reverse
key_part++;
}
- *used_key_parts= (uint) (key_part - table->key_info[idx].key_part);
+ uint tmp= (uint) (key_part - table->key_info[idx].key_part);
+ if (reverse == -1 && !(table->file->index_flags(idx,tmp-1, 1) & HA_READ_PREV))
+ DBUG_RETURN(0);
+ *used_key_parts= tmp;
DBUG_RETURN(reverse);
}
@@ -7120,10 +7123,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
*/
if (!select->quick->reverse_sorted())
{
- // here used_key_parts >0
- if (!(table->file->index_flags(ref_key,used_key_parts-1, 1)
- & HA_READ_PREV))
- DBUG_RETURN(0); // Use filesort
// ORDER BY range_key DESC
QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick,
used_key_parts);
@@ -7144,9 +7143,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
Use a traversal function that starts by reading the last row
with key part (A) and then traverse the index backwards.
*/
- if (!(table->file->index_flags(ref_key,used_key_parts-1, 1)
- & HA_READ_PREV))
- DBUG_RETURN(0); // Use filesort
tab->read_first_record= join_read_last_key;
tab->read_record.read_record= join_read_prev_same;
/* fall through */
@@ -7192,7 +7188,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
if (keys.is_set(nr))
{
int flag;
- if ((flag=test_if_order_by_key(order, table, nr, &not_used)))
+ if (flag=test_if_order_by_key(order, table, nr, &not_used))
{
if (!no_changes)
{
@@ -7946,15 +7942,14 @@ find_order_in_list(THD *thd, Item **ref_pointer_array,
TABLE_LIST *tables,ORDER *order, List<Item> &fields,
List<Item> &all_fields)
{
- Item *itemptr=*order->item;
- if (itemptr->type() == Item::INT_ITEM)
+ Item *it= *order->item;
+ if (it->type() == Item::INT_ITEM)
{ /* Order by position */
- uint count= (uint) itemptr->val_int();
+ uint count= (uint) it->val_int();
if (!count || count > fields.elements)
{
my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),
- MYF(0),itemptr->full_name(),
- thd->where);
+ MYF(0), it->full_name(), thd->where);
return 1;
}
order->item= ref_pointer_array + count-1;
@@ -7962,20 +7957,28 @@ find_order_in_list(THD *thd, Item **ref_pointer_array,
return 0;
}
uint counter;
- Item **item= find_item_in_list(itemptr, fields, &counter,
- REPORT_EXCEPT_NOT_FOUND);
+ bool unaliased;
+ Item **item= find_item_in_list(it, fields, &counter,
+ REPORT_EXCEPT_NOT_FOUND, &unaliased);
if (!item)
return 1;
if (item != (Item **)not_found_item)
{
+ /*
+ If we have found field not by its alias in select list but by its
+ original field name, we should additionaly check if we have conflict
+ for this name (in case if we would perform lookup in all tables).
+ */
+ if (unaliased && !it->fixed && it->fix_fields(thd, tables, order->item))
+ return 1;
+
order->item= ref_pointer_array + counter;
order->in_field_list=1;
return 0;
}
order->in_field_list=0;
- Item *it= *order->item;
/*
We check it->fixed because Item_func_group_concat can put
arguments for which fix_fields already was called.
@@ -8104,10 +8107,11 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields,
thd->set_query_id=1; // Not really needed, but...
uint counter;
+ bool not_used;
for (; new_field ; new_field= new_field->next)
{
if ((item= find_item_in_list(*new_field->item, fields, &counter,
- IGNORE_ERRORS)))
+ IGNORE_ERRORS, &not_used)))
new_field->item=item; /* Change to shared Item */
else
{
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index d084c051621..413de53f4bc 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -714,10 +714,11 @@ mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild,
protocol->store(field->has_charset() ? field->charset()->name : "NULL",
system_charset_info);
/*
- Altough TIMESTAMP fields can't contain NULL as its value they
+ Even if TIMESTAMP field can't contain NULL as its value it
will accept NULL if you will try to insert such value and will
- convert it to current TIMESTAMP. So YES here means that NULL
- is allowed for assignment but can't be returned.
+ convert NULL value to current TIMESTAMP. So YES here means
+ that NULL is allowed for assignment (but may be won't be
+ returned).
*/
pos=(byte*) ((flags & NOT_NULL_FLAG) &&
field->type() != FIELD_TYPE_TIMESTAMP ?
@@ -1147,40 +1148,18 @@ static const char *require_quotes(const char *name, uint name_length)
}
-static void append_quoted_simple_identifier(String *packet, char quote_char,
- const char *name, uint length)
-{
- packet->append(&quote_char, 1, system_charset_info);
- packet->append(name, length, system_charset_info);
- packet->append(&quote_char, 1, system_charset_info);
-}
-
-
void
append_identifier(THD *thd, String *packet, const char *name, uint length)
{
const char *name_end;
- char quote_char;
+ int q= get_quote_char_for_identifier(thd, name, length);
- if (thd->variables.sql_mode & MODE_ANSI_QUOTES)
- quote_char= '\"';
- else
- quote_char= '`';
-
- if (is_keyword(name,length))
- {
- append_quoted_simple_identifier(packet, quote_char, name, length);
+ if (q == EOF) {
+ packet->append(name, length, system_charset_info);
return;
}
- if (!require_quotes(name, length))
- {
- if (!(thd->options & OPTION_QUOTE_SHOW_CREATE))
- packet->append(name, length, system_charset_info);
- else
- append_quoted_simple_identifier(packet, quote_char, name, length);
- return;
- }
+ char quote_char= q;
/* The identifier must be quoted as it includes a quote character */
@@ -1199,6 +1178,22 @@ append_identifier(THD *thd, String *packet, const char *name, uint length)
}
+/* Get the quote character for displaying an identifier.
+ If no quote character is needed, return EOF. */
+
+int get_quote_char_for_identifier(THD *thd, const char *name, uint length)
+{
+ if (!is_keyword(name,length) &&
+ !require_quotes(name, length) &&
+ !(thd->options & OPTION_QUOTE_SHOW_CREATE))
+ return EOF;
+ else if (thd->variables.sql_mode & MODE_ANSI_QUOTES)
+ return '"';
+ else
+ return '`';
+}
+
+
/* Append directory name (if exists) to CREATE INFO */
static void append_directory(THD *thd, String *packet, const char *dir_type,
@@ -1295,7 +1290,14 @@ store_create_info(THD *thd, TABLE *table, String *packet)
if (flags & NOT_NULL_FLAG)
packet->append(" NOT NULL", 9);
-
+ else if (field->type() == FIELD_TYPE_TIMESTAMP)
+ {
+ /*
+ TIMESTAMP field require explicit NULL flag, because unlike
+ all other fields they are treated as NOT NULL by default.
+ */
+ packet->append(" NULL", 5);
+ }
/*
Again we are using CURRENT_TIMESTAMP instead of NOW because it is
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index c3bd4771fbc..3d5aaf0c2ec 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -3054,12 +3054,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
- /*
- We don't want update TIMESTAMP fields during ALTER TABLE
- and copy_data_between_tables uses only write_row() for new_table so
- don't need to set up timestamp_on_update_now member.
- */
- new_table->timestamp_default_now= 0;
+ /* We don't want update TIMESTAMP fields during ALTER TABLE. */
+ new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
new_table->next_number_field=new_table->found_next_number_field;
thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0L;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index b6cd0d967e9..c6fb3d6e415 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -116,7 +116,7 @@ int mysql_update(THD *thd,
{
// Don't set timestamp column if this is modified
if (table->timestamp_field->query_id == thd->query_id)
- table->timestamp_on_update_now= 0;
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
else
table->timestamp_field->query_id=timestamp_query_id;
}
@@ -526,7 +526,7 @@ int mysql_multi_update(THD *thd,
// Only set timestamp column if this is not modified
if (table->timestamp_field &&
table->timestamp_field->query_id == thd->query_id)
- table->timestamp_on_update_now= 0;
+ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
/* if table will be updated then check that it is unique */
if (table->map & item_tables)
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 5bf5140d0d8..0c81c172cf7 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1415,10 +1415,21 @@ type:
if (YYTHD->variables.sql_mode & MODE_MAXDB)
$$=FIELD_TYPE_DATETIME;
else
+ {
+ /*
+ Unlike other types TIMESTAMP fields are NOT NULL by default.
+ */
+ Lex->type|= NOT_NULL_FLAG;
$$=FIELD_TYPE_TIMESTAMP;
+ }
}
- | TIMESTAMP '(' NUM ')' { Lex->length=$3.str;
- $$=FIELD_TYPE_TIMESTAMP; }
+ | TIMESTAMP '(' NUM ')'
+ {
+ LEX *lex= Lex;
+ lex->length= $3.str;
+ lex->type|= NOT_NULL_FLAG;
+ $$= FIELD_TYPE_TIMESTAMP;
+ }
| DATETIME { $$=FIELD_TYPE_DATETIME; }
| TINYBLOB { Lex->charset=&my_charset_bin;
$$=FIELD_TYPE_TINY_BLOB; }
diff --git a/sql/table.h b/sql/table.h
index f111377bc85..904038ad029 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -57,6 +57,16 @@ typedef struct st_filesort_info
} FILESORT_INFO;
+/*
+ Values in this enum are used to indicate during which operations value
+ of TIMESTAMP field should be set to current timestamp.
+*/
+enum timestamp_auto_set_type
+{
+ TIMESTAMP_NO_AUTO_SET= 0, TIMESTAMP_AUTO_SET_ON_INSERT= 1,
+ TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3
+};
+
/* Table cache entry struct */
class Field_timestamp;
@@ -99,16 +109,19 @@ struct st_table {
uint status; /* Used by postfix.. */
uint system; /* Set if system record */
- /*
- These two members hold offset in record + 1 for TIMESTAMP field
- with NOW() as default value or/and with ON UPDATE NOW() option.
- If 0 then such field is absent in this table or auto-set for default
- or/and on update should be temporaly disabled for some reason.
- These values is setup to offset value for each statement in open_table()
- and turned off in statement processing code (see mysql_update as example).
+ /*
+ If this table has TIMESTAMP field with auto-set property (pointed by
+ timestamp_field member) then this variable indicates during which
+ operations (insert only/on update/in both cases) we should set this
+ field to current timestamp. If there are no such field in this table
+ or we should not automatically set its value during execution of current
+ statement then the variable contains TIMESTAMP_NO_AUTO_SET (i.e. 0).
+
+ Value of this variable is set for each statement in open_table() and
+ if needed cleared later in statement processing code (see mysql_update()
+ as example).
*/
- ulong timestamp_default_now;
- ulong timestamp_on_update_now;
+ timestamp_auto_set_type timestamp_field_type;
/* Index of auto-updated TIMESTAMP field in field array */
uint timestamp_field_offset;