summaryrefslogtreecommitdiff
path: root/src/redis-trib.rb
diff options
context:
space:
mode:
authorantirez <antirez@gmail.com>2013-11-05 11:24:24 +0100
committerantirez <antirez@gmail.com>2013-11-05 11:24:24 +0100
commit6cf230ea91619f1a90f1df2ffd0de023458bf9e5 (patch)
treeb760001700bb89d4de9822a47d1a803bafb4c981 /src/redis-trib.rb
parent9fba193a5995208f911b7184017df780d155c685 (diff)
downloadredis-6cf230ea91619f1a90f1df2ffd0de023458bf9e5.tar.gz
Initial support for --replicas in redis-trib.
Diffstat (limited to 'src/redis-trib.rb')
-rwxr-xr-xsrc/redis-trib.rb123
1 files changed, 109 insertions, 14 deletions
diff --git a/src/redis-trib.rb b/src/redis-trib.rb
index 76eb6f436..b6bf2980c 100755
--- a/src/redis-trib.rb
+++ b/src/redis-trib.rb
@@ -61,6 +61,7 @@ class ClusterNode
@info[:slots] = {}
@info[:migrating] = {}
@info[:importing] = {}
+ @info[:replicate] = false
@dirty = false # True if we need to flush slots info into node.
@friends = []
end
@@ -172,16 +173,33 @@ class ClusterNode
@dirty = true
end
+ def set_as_replica(node_id)
+ @info[:replicate] = node_id
+ @dirty = true
+ end
+
def flush_node_config
return if !@dirty
- new = []
- @info[:slots].each{|s,val|
- if val == :new
- new << s
- @info[:slots][s] = true
+ if @info[:replicate]
+ begin
+ @r.cluster("replicate",@info[:replicate])
+ rescue
+ # If the cluster did not already joined it is possible that
+ # the slave does not know the master node yet. So on errors
+ # we return ASAP leaving the dirty flag set, to flush the
+ # config later.
+ return
end
- }
- @r.cluster("addslots",*new)
+ else
+ new = []
+ @info[:slots].each{|s,val|
+ if val == :new
+ new << s
+ @info[:slots][s] = true
+ end
+ }
+ @r.cluster("addslots",*new)
+ end
@dirty = false
end
@@ -218,9 +236,14 @@ class ClusterNode
}.join(",")
role = self.has_flag?("master") ? "M" : "S"
- "#{role}: #{self.info[:name]} #{self.to_s}\n"+
- " slots:#{slots} (#{self.slots.length} slots) "+
- "#{(self.info[:flags]-["myself"]).join(",")}"
+
+ if self.info[:replicate] and @dirty
+ "S: #{self.info[:name]} #{self.to_s}"
+ else
+ "#{role}: #{self.info[:name]} #{self.to_s}\n"+
+ " slots:#{slots} (#{self.slots.length} slots) "+
+ "#{(self.info[:flags]-["myself"]).join(",")}"
+ end
end
# Return a single string representing nodes and associated slots.
@@ -460,15 +483,68 @@ class RedisTrib
end
def alloc_slots
- slots_per_node = ClusterHashSlots/@nodes.length
- i = 0
+ nodes_count = @nodes.length
+ masters_count = @nodes.length / (@replicas+1)
+ slots_per_node = ClusterHashSlots / masters_count
+ masters = []
+ slaves = []
+
+ # The first step is to split instances by IP. This is useful as
+ # we'll try to allocate master nodes in different physical machines
+ # (as much as possible) and to allocate slaves of a given master in
+ # different physical machines as well.
+ #
+ # This code assumes just that if the IP is different, than it is more
+ # likely that the instance is running in a different physical host
+ # or at least a different virtual machine.
+ ips = {}
@nodes.each{|n|
+ ips[n.info[:host]] = [] if !ips[n.info[:host]]
+ ips[n.info[:host]] << n
+ }
+
+ # Select master instances
+ puts "Using #{masters_count} masters:"
+ while masters.length < masters_count
+ ips.each{|ip,nodes_list|
+ next if nodes_list.length == 0
+ masters << nodes_list.shift
+ puts masters[-1]
+ nodes_count -= 1
+ break if masters.length == masters_count
+ }
+ end
+
+ # Alloc slots on masters
+ i = 0
+ masters.each{|n|
first = i*slots_per_node
last = first+slots_per_node-1
last = ClusterHashSlots-1 if i == @nodes.length-1
n.add_slots first..last
i += 1
}
+
+ # Select N replicas for every master.
+ # We try to split the replicas among all the IPs with spare nodes
+ # trying to avoid the host where the master is running, if possible.
+ masters.each{|m|
+ i = 0
+ while i < @replicas
+ ips.each{|ip,nodes_list|
+ next if nodes_list.length == 0
+ # Skip instances with the same IP as the master if we
+ # have some more IPs available.
+ next if ip == m.info[:host] && nodes_count > nodes_list.length
+ slave = nodes_list.shift
+ slave.set_as_replica(m.info[:name])
+ nodes_count -= 1
+ i += 1
+ puts "#{m} replica ##{i} is #{slave}"
+ break if masters.length == masters_count
+ }
+ end
+ }
end
def flush_nodes_config
@@ -667,7 +743,24 @@ class RedisTrib
}
end
+ # This is an helper function for create_cluster_cmd that verifies if
+ # the number of nodes and the specified replicas have a valid configuration
+ # where there are at least three master nodes and enough replicas per node.
+ def check_create_parameters
+ masters = @nodes.length/(@replicas+1)
+ if masters < 3
+ puts "*** ERROR: Invalid configuration for cluster creation."
+ puts "*** Redis Cluster requires at least 3 master nodes."
+ puts "*** This is not possible with #{@nodes.length} nodes and #{@replicas} replicas per node."
+ puts "*** At least #{3*(@replicas+1)} nodes are required."
+ exit 1
+ end
+ end
+
def create_cluster_cmd(argv,opt)
+ opt = {'replicas' => 0}.merge(opt)
+ @replicas = opt['replicas'].to_i
+
xputs ">>> Creating cluster"
argv[0..-1].each{|n|
node = ClusterNode.new(n)
@@ -677,6 +770,7 @@ class RedisTrib
node.assert_empty
add_node(node)
}
+ check_create_parameters
xputs ">>> Performing hash slots allocation on #{@nodes.length} nodes..."
alloc_slots
show_nodes
@@ -690,6 +784,7 @@ class RedisTrib
# they are still empty with unassigned slots.
sleep 1
wait_cluster_join
+ flush_nodes_config # Useful for the replicas
check_cluster
end
@@ -758,11 +853,11 @@ COMMANDS={
}
ALLOWED_OPTIONS={
- "create" => {"slaves" => false}
+ "create" => {"replicas" => true}
}
def show_help
- puts "Usage: redis-trib <command> <arguments ...>"
+ puts "Usage: redis-trib <command> <options> <arguments ...>"
puts
COMMANDS.each{|k,v|
puts " #{k.ljust(10)} #{v[2]}"