diff options
author | Jacob Mathew <jacob.mathew@mariadb.com> | 2018-07-09 14:25:37 -0700 |
---|---|---|
committer | Jacob Mathew <jacob.mathew@mariadb.com> | 2018-07-09 14:25:37 -0700 |
commit | 97cc9d34e3e27f8354e55c43d19d8f313b7ae6fd (patch) | |
tree | ec2aa5e9fae5c1662110d81195dbeefd250a901e /sql | |
parent | b27ec709350e13c5cdc08dcdcaeb31b1cc0f803e (diff) | |
download | mariadb-git-97cc9d34e3e27f8354e55c43d19d8f313b7ae6fd.tar.gz |
MDEV-16246: insert timestamp into spider table from mysqldump gets wrong time zone.bb-10.3-MDEV-16246
The problem occurred because the Spider node was incorrectly handling
timestamp values sent to and received from the data nodes.
The problem has been corrected as follows:
- Added logic to set and maintain the UTC time zone on the data nodes.
To prevent timestamp ambiguity, it is necessary for the data nodes to use
a time zone such as UTC which does not have daylight savings time.
- Removed the spider_sync_time_zone configuration variable, which did not
solve the problem and which interfered with the solution.
- Added logic to convert to the UTC time zone all timestamp values sent to
and received from the data nodes. This is done for both unique and
non-unique timestamp columns. It is done for WHERE clauses, applying to
SELECT, UPDATE and DELETE statements, and for UPDATE columns.
- Disabled Spider's use of direct update when any of the columns to update is
a timestamp column. This is necessary to prevent false duplicate key value
errors.
- Added a new test spider.timestamp to thoroughly test Spider's handling of
timestamp values.
Author:
Jacob Mathew.
Reviewer:
Kentoku Shiba.
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_partition.cc | 12 | ||||
-rw-r--r-- | sql/ha_partition.h | 4 | ||||
-rw-r--r-- | sql/handler.h | 4 | ||||
-rw-r--r-- | sql/sql_update.cc | 5 |
4 files changed, 15 insertions, 10 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 0775d67a592..20873c01771 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -11142,13 +11142,14 @@ int ha_partition::end_bulk_delete() SYNOPSIS direct_update_rows_init() + update fields Pointer to the list of fields to update RETURN VALUE >0 Error 0 Success */ -int ha_partition::direct_update_rows_init() +int ha_partition::direct_update_rows_init(List<Item> *update_fields) { int error; uint i, found; @@ -11174,8 +11175,8 @@ int ha_partition::direct_update_rows_init() { file= m_file[i]; if (unlikely((error= (m_pre_calling ? - file->pre_direct_update_rows_init() : - file->direct_update_rows_init())))) + file->pre_direct_update_rows_init(update_fields) : + file->direct_update_rows_init(update_fields))))) { DBUG_PRINT("info", ("partition FALSE by storage engine")); DBUG_RETURN(error); @@ -11213,20 +11214,21 @@ int ha_partition::direct_update_rows_init() SYNOPSIS pre_direct_update_rows_init() + update fields Pointer to the list of fields to update RETURN VALUE >0 Error 0 Success */ -int ha_partition::pre_direct_update_rows_init() +int ha_partition::pre_direct_update_rows_init(List<Item> *update_fields) { bool save_m_pre_calling; int error; DBUG_ENTER("ha_partition::pre_direct_update_rows_init"); save_m_pre_calling= m_pre_calling; m_pre_calling= TRUE; - error= direct_update_rows_init(); + error= direct_update_rows_init(update_fields); m_pre_calling= save_m_pre_calling; DBUG_RETURN(error); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index e661d0badd3..8a251016703 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -620,8 +620,8 @@ public: virtual int bulk_update_row(const uchar *old_data, const uchar *new_data, ha_rows *dup_key_found); virtual int update_row(const uchar * old_data, const uchar * new_data); - virtual int direct_update_rows_init(); - virtual int pre_direct_update_rows_init(); + virtual int direct_update_rows_init(List<Item> *update_fields); + virtual int pre_direct_update_rows_init(List<Item> *update_fields); virtual int direct_update_rows(ha_rows *update_rows); virtual int pre_direct_update_rows(); virtual bool start_bulk_delete(); diff --git a/sql/handler.h b/sql/handler.h index 3d1b764bd14..6828173282d 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -4399,12 +4399,12 @@ private: /* Perform initialization for a direct update request */ public: int ha_direct_update_rows(ha_rows *update_rows); - virtual int direct_update_rows_init() + virtual int direct_update_rows_init(List<Item> *update_fields) { return HA_ERR_WRONG_COMMAND; } private: - virtual int pre_direct_update_rows_init() + virtual int pre_direct_update_rows_init(List<Item> *update_fields) { return HA_ERR_WRONG_COMMAND; } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c0fa2b4d7fe..cb7bcdc33a1 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -615,6 +615,9 @@ int mysql_update(THD *thd, - Note that Spider can handle ORDER BY and LIMIT in a cluster with one data node. These conditions are therefore checked in direct_update_rows_init(). + - Update fields include a unique timestamp field + - The storage engine may not be able to avoid false duplicate key + errors. This condition is checked in direct_update_rows_init(). Direct update does not require a WHERE clause @@ -637,7 +640,7 @@ int mysql_update(THD *thd, if (!table->file->info_push(INFO_KIND_UPDATE_FIELDS, &fields) && !table->file->info_push(INFO_KIND_UPDATE_VALUES, &values) && - !table->file->direct_update_rows_init()) + !table->file->direct_update_rows_init(&fields)) { do_direct_update= TRUE; |