summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorVicențiu Ciorbaru <vicentiu@mariadb.org>2017-12-28 19:27:00 +0200
committerVicențiu Ciorbaru <vicentiu@mariadb.org>2017-12-28 19:27:00 +0200
commit9aeb5d01d6bb1506febfcab6ef9d78417efa87df (patch)
tree96602fb6d88d8a14a69d27aa241fe0e59e2ebc3e /sql
parent14de2ad3cb4e1f8f48b83d5e9aafa4e3a366d152 (diff)
parent7e4c185c774cabaa1912760e143d9385ce959eea (diff)
downloadmariadb-git-9aeb5d01d6bb1506febfcab6ef9d78417efa87df.tar.gz
Merge remote-tracking branch 'origin/10.1' into bb-10.2-vicentiu
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_partition.cc69
-rw-r--r--sql/log.cc88
-rw-r--r--sql/log.h8
-rw-r--r--sql/sql_acl.cc26
-rw-r--r--sql/wsrep_mysqld.cc10
5 files changed, 147 insertions, 54 deletions
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index cc5521114f4..56e1f649286 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2184,38 +2184,19 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
DBUG_ASSERT(sub_elem);
part= i * num_subparts + j;
DBUG_ASSERT(part < m_file_tot_parts && m_file[part]);
- if (ha_legacy_type(m_file[part]->ht) == DB_TYPE_INNODB)
- {
- dummy_info.data_file_name= dummy_info.index_file_name = NULL;
- m_file[part]->update_create_info(&dummy_info);
-
- if (dummy_info.data_file_name || sub_elem->data_file_name)
- {
- sub_elem->data_file_name = (char*) dummy_info.data_file_name;
- }
- if (dummy_info.index_file_name || sub_elem->index_file_name)
- {
- sub_elem->index_file_name = (char*) dummy_info.index_file_name;
- }
- }
+ dummy_info.data_file_name= dummy_info.index_file_name = NULL;
+ m_file[part]->update_create_info(&dummy_info);
+ sub_elem->data_file_name = (char*) dummy_info.data_file_name;
+ sub_elem->index_file_name = (char*) dummy_info.index_file_name;
}
}
else
{
DBUG_ASSERT(m_file[i]);
- if (ha_legacy_type(m_file[i]->ht) == DB_TYPE_INNODB)
- {
- dummy_info.data_file_name= dummy_info.index_file_name= NULL;
- m_file[i]->update_create_info(&dummy_info);
- if (dummy_info.data_file_name || part_elem->data_file_name)
- {
- part_elem->data_file_name = (char*) dummy_info.data_file_name;
- }
- if (dummy_info.index_file_name || part_elem->index_file_name)
- {
- part_elem->index_file_name = (char*) dummy_info.index_file_name;
- }
- }
+ dummy_info.data_file_name= dummy_info.index_file_name= NULL;
+ m_file[i]->update_create_info(&dummy_info);
+ part_elem->data_file_name = (char*) dummy_info.data_file_name;
+ part_elem->index_file_name = (char*) dummy_info.index_file_name;
}
}
DBUG_VOID_RETURN;
@@ -8160,20 +8141,36 @@ uint ha_partition::alter_table_flags(uint flags)
bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{
- handler **file;
- bool ret= COMPATIBLE_DATA_YES;
-
/*
The check for any partitioning related changes have already been done
in mysql_alter_table (by fix_partition_func), so it is only up to
the underlying handlers.
*/
- for (file= m_file; *file; file++)
- if ((ret= (*file)->check_if_incompatible_data(create_info,
- table_changes)) !=
- COMPATIBLE_DATA_YES)
- break;
- return ret;
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ HA_CREATE_INFO dummy_info= *create_info;
+ uint i=0;
+ while (partition_element *part_elem= part_it++)
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> subpart_it(part_elem->subpartitions);
+ while (partition_element *sub_elem= subpart_it++)
+ {
+ dummy_info.data_file_name= sub_elem->data_file_name;
+ dummy_info.index_file_name= sub_elem->index_file_name;
+ if (m_file[i++]->check_if_incompatible_data(&dummy_info, table_changes))
+ return COMPATIBLE_DATA_NO;
+ }
+ }
+ else
+ {
+ dummy_info.data_file_name= part_elem->data_file_name;
+ dummy_info.index_file_name= part_elem->index_file_name;
+ if (m_file[i++]->check_if_incompatible_data(&dummy_info, table_changes))
+ return COMPATIBLE_DATA_NO;
+ }
+ }
+ return COMPATIBLE_DATA_YES;
}
diff --git a/sql/log.cc b/sql/log.cc
index 3aff3b2c40d..a905e2b8caf 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -4946,7 +4946,55 @@ MYSQL_BIN_LOG::is_xidlist_idle_nolock()
return true;
}
+#ifdef WITH_WSREP
+inline bool
+is_gtid_cached_internal(IO_CACHE *file)
+{
+ uchar data[EVENT_TYPE_OFFSET+1];
+ bool result= false;
+ my_off_t write_pos= my_b_tell(file);
+ if (reinit_io_cache(file, READ_CACHE, 0, 0, 0))
+ return false;
+ /*
+ In the cache we have gtid event if , below condition is true,
+ */
+ my_b_read(file, data, sizeof(data));
+ uint event_type= (uchar)data[EVENT_TYPE_OFFSET];
+ if (event_type == GTID_LOG_EVENT)
+ result= true;
+ /*
+ Cleanup , Why because we have not read the full buffer
+ and this will cause next to next reinit_io_cache(called in write_cache)
+ to make cache empty.
+ */
+ file->read_pos= file->read_end;
+ if (reinit_io_cache(file, WRITE_CACHE, write_pos, 0, 0))
+ return false;
+ return result;
+}
+#endif
+#ifdef WITH_WSREP
+inline bool
+MYSQL_BIN_LOG::is_gtid_cached(THD *thd)
+{
+ binlog_cache_mngr *mngr= (binlog_cache_mngr *) thd_get_ha_data(
+ thd, binlog_hton);
+ if (!mngr)
+ return false;
+ binlog_cache_data *cache_trans= mngr->get_binlog_cache_data(
+ use_trans_cache(thd, true));
+ binlog_cache_data *cache_stmt= mngr->get_binlog_cache_data(
+ use_trans_cache(thd, false));
+ if (cache_trans && !cache_trans->empty() &&
+ is_gtid_cached_internal(&cache_trans->cache_log))
+ return true;
+ if (cache_stmt && !cache_stmt->empty() &&
+ is_gtid_cached_internal(&cache_stmt->cache_log))
+ return true;
+ return false;
+}
+#endif
/**
Create a new log file name.
@@ -5541,7 +5589,37 @@ THD::binlog_start_trans_and_stmt()
cache_mngr->trx_cache.get_prev_position() == MY_OFF_T_UNDEF)
{
this->binlog_set_stmt_begin();
- if (in_multi_stmt_transaction_mode())
+ bool mstmt_mode= in_multi_stmt_transaction_mode();
+#ifdef WITH_WSREP
+ /* Write Gtid
+ Get domain id only when gtid mode is set
+ If this event is replicate through a master then ,
+ we will forward the same gtid another nodes
+ We have to do this only one time in mysql transaction.
+ Since this function is called multiple times , We will check for
+ ha_info->is_started()
+ */
+ Ha_trx_info *ha_info;
+ ha_info= this->ha_data[binlog_hton->slot].ha_info + (mstmt_mode ? 1 : 0);
+
+ if (!ha_info->is_started() && wsrep_gtid_mode
+ && this->variables.gtid_seq_no)
+ {
+ binlog_cache_mngr *const cache_mngr=
+ (binlog_cache_mngr*) thd_get_ha_data(this, binlog_hton);
+
+ IO_CACHE *file=
+ cache_mngr->get_binlog_cache_log(use_trans_cache(this, true));
+ Log_event_writer writer(file);
+ Gtid_log_event gtid_event(this, this->variables.gtid_seq_no,
+ this->variables.gtid_domain_id,
+ true, LOG_EVENT_SUPPRESS_USE_F,
+ true, 0);
+ gtid_event.server_id= this->variables.server_id;
+ writer.write(&gtid_event);
+ }
+#endif
+ if (mstmt_mode)
trans_register_ha(this, TRUE, binlog_hton);
trans_register_ha(this, FALSE, binlog_hton);
/*
@@ -5823,7 +5901,7 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
DBUG_PRINT("enter", ("standalone: %d", standalone));
#ifdef WITH_WSREP
- if (WSREP(thd) && thd->wsrep_trx_meta.gtid.seqno != -1 && wsrep_gtid_mode)
+ if (WSREP(thd) && thd->wsrep_trx_meta.gtid.seqno != -1 && wsrep_gtid_mode && !thd->variables.gtid_seq_no)
{
domain_id= wsrep_gtid_domain_id;
} else {
@@ -5875,6 +5953,12 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
/* Write the event to the binary log. */
DBUG_ASSERT(this == &mysql_bin_log);
+
+#ifdef WITH_WSREP
+ if (wsrep_gtid_mode && is_gtid_cached(thd))
+ DBUG_RETURN(false);
+#endif
+
if (write_event(&gtid_event))
DBUG_RETURN(true);
status_var_add(thd->status_var.binlog_bytes_written, gtid_event.data_written);
diff --git a/sql/log.h b/sql/log.h
index 97e257829b7..5589cbf168b 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -560,7 +560,13 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
bool write_transaction_to_binlog_events(group_commit_entry *entry);
void trx_group_commit_leader(group_commit_entry *leader);
bool is_xidlist_idle_nolock();
-
+#ifdef WITH_WSREP
+ /*
+ When this mariadb node is slave and galera enabled. So in this case
+ we write the gtid in wsrep_run_commit itself.
+ */
+ inline bool is_gtid_cached(THD *thd);
+#endif
public:
/*
A list of struct xid_count_per_binlog is used to keep track of how many
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 9c7eb8af736..34c28e95e89 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -6301,6 +6301,8 @@ static int merge_role_privileges(ACL_ROLE *role __attribute__((unused)),
if (--grantee->counter)
return 1; // don't recurse into grantee just yet
+ grantee->counter= 1; // Mark the grantee as merged.
+
/* if we'll do db/table/routine privileges, create a hash of role names */
role_hash_t role_hash(role_key);
if (data->what != PRIVS_TO_MERGE::GLOBAL)
@@ -7399,14 +7401,16 @@ end_index_init:
DBUG_RETURN(return_val);
}
-static my_bool collect_leaf_roles(void *role_ptr,
- void *roles_array)
+static my_bool propagate_role_grants_action(void *role_ptr,
+ void *ptr __attribute__((unused)))
{
ACL_ROLE *role= static_cast<ACL_ROLE *>(role_ptr);
- Dynamic_array<ACL_ROLE *> *array=
- static_cast<Dynamic_array<ACL_ROLE *> *>(roles_array);
- if (!role->counter)
- array->push(role);
+ if (role->counter)
+ return 0;
+
+ mysql_mutex_assert_owner(&acl_cache->lock);
+ PRIVS_TO_MERGE data= { PRIVS_TO_MERGE::ALL, 0, 0 };
+ traverse_role_graph_up(role, &data, NULL, merge_role_privileges);
return 0;
}
@@ -7476,15 +7480,7 @@ bool grant_reload(THD *thd)
}
mysql_mutex_lock(&acl_cache->lock);
- Dynamic_array<ACL_ROLE *> leaf_roles;
- my_hash_iterate(&acl_roles, collect_leaf_roles, &leaf_roles);
- PRIVS_TO_MERGE data= { PRIVS_TO_MERGE::ALL, 0, 0 };
- for (size_t i= 0; i < leaf_roles.elements(); i++)
- {
- traverse_role_graph_up(leaf_roles.at(i), &data, NULL,
- merge_role_privileges);
- }
-
+ my_hash_iterate(&acl_roles, propagate_role_grants_action, NULL);
mysql_mutex_unlock(&acl_cache->lock);
mysql_rwlock_unlock(&LOCK_grant);
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 2ac158817cb..76c753cf0b4 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -1252,6 +1252,16 @@ int wsrep_to_buf_helper(
if (!ret && writer.write(&gtid_ev)) ret= 1;
}
#endif /* GTID_SUPPORT */
+ if (wsrep_gtid_mode && thd->variables.gtid_seq_no)
+ {
+ Gtid_log_event gtid_event(thd, thd->variables.gtid_seq_no,
+ thd->variables.gtid_domain_id,
+ true, LOG_EVENT_SUPPRESS_USE_F,
+ true, 0);
+ gtid_event.server_id= thd->variables.server_id;
+ if (!gtid_event.is_valid()) ret= 0;
+ ret= writer.write(&gtid_event);
+ }
/* if there is prepare query, add event for it */
if (!ret && thd->wsrep_TOI_pre_query)