summaryrefslogtreecommitdiff
path: root/trove/taskmanager/manager.py
diff options
context:
space:
mode:
authorZhao Chao <zhaochao1984@gmail.com>2018-03-08 17:09:14 +0800
committerZhao Chao <zhaochao1984@gmail.com>2018-03-09 10:51:41 +0800
commit5895cf0ee99022e2910ec8c72393fe998f5860f8 (patch)
tree4ea64544c2eec93f7c27176f6af246036cc9e40c /trove/taskmanager/manager.py
parentad496c07980aa8d17eac6a1670f9e7c8ca816864 (diff)
downloadtrove-5895cf0ee99022e2910ec8c72393fe998f5860f8.tar.gz
Avoid diverged slave when migrating MariaDB master
When promoting one slave to the new master in a replication group, previously the old master will be attached to the new one right after the new master is on. For MariaDB, attaching the old master to the new one, new GTID may be created on the old master and also may be synced to some of the other replicas, as they're still connecting to the old master. The new GTID does not exists in the new master, making these slaves diverged from the master. After that, when the diverged slave connects to the new master, 'START SLAVE' will fail with logs like: [ERROR] Error reading packet from server: Error: connecting slave requested to start from GTID X-XXXXXXXXXX-XX, which is not in the master's binlog. Since the master's binlog contains GTIDs with higher sequence numbers, it probably means that the slave has diverged due to executing extra erroneous transactions (server_errno=1236) And these slaves will be left orphan and errored after promote_to_replica_source finishs. Attaching the other replicas to the new master before dealing with the old master will fix this problem and the failure of the trove-scenario-mariadb-multi Zuul job as well. Closes-Bug: #1754539 Change-Id: Ib9c01b07c832f117f712fd613ae55c7de3561116 Signed-off-by: Zhao Chao <zhaochao1984@gmail.com>
Diffstat (limited to 'trove/taskmanager/manager.py')
-rw-r--r--trove/taskmanager/manager.py26
1 files changed, 24 insertions, 2 deletions
diff --git a/trove/taskmanager/manager.py b/trove/taskmanager/manager.py
index e600e7ad..7559c1ec 100644
--- a/trove/taskmanager/manager.py
+++ b/trove/taskmanager/manager.py
@@ -99,6 +99,26 @@ class Manager(periodic_task.PeriodicTasks):
replica_models):
# First, we transition from the old master to new as quickly as
# possible to minimize the scope of unrecoverable error
+
+ # NOTE(zhaochao): we cannot reattach the old master to the new
+ # one immediately after the new master is up, because for MariaDB
+ # the other replicas are still connecting to the old master, and
+ # during reattaching the old master as a slave, new GTID may be
+ # created and synced to the replicas. After that, when attaching
+ # the replicas to the new master, 'START SLAVE' will fail by
+ # 'fatal error 1236' if the binlog of the replica diverged from
+ # the new master. So the proper order should be:
+ # -1. make the old master read only (and detach floating ips)
+ # -2. make sure the new master is up-to-date
+ # -3. detach the new master from the old one
+ # -4. enable the new master (and attach floating ips)
+ # -5. attach the other replicas to the new master
+ # -6. attach the old master to the new one
+ # (and attach floating ips)
+ # -7. demote the old master
+ # What we changed here is the order of the 6th step, previously
+ # this step took place right after step 4, which causes failures
+ # with MariaDB replications.
old_master.make_read_only(True)
master_ips = old_master.detach_public_ips()
slave_ips = master_candidate.detach_public_ips()
@@ -106,10 +126,8 @@ class Manager(periodic_task.PeriodicTasks):
master_candidate.wait_for_txn(latest_txn_id)
master_candidate.detach_replica(old_master, for_failover=True)
master_candidate.enable_as_master()
- old_master.attach_replica(master_candidate)
master_candidate.attach_public_ips(master_ips)
master_candidate.make_read_only(False)
- old_master.attach_public_ips(slave_ips)
# At this point, should something go wrong, there
# should be a working master with some number of working slaves,
@@ -138,6 +156,10 @@ class Manager(periodic_task.PeriodicTasks):
error_messages += "%s (%s)\n" % (
exc_fmt % msg_content, ex)
+ # dealing with the old master after all the other replicas
+ # has been migrated.
+ old_master.attach_replica(master_candidate)
+ old_master.attach_public_ips(slave_ips)
try:
old_master.demote_replication_master()
except Exception as ex: